From noreply at buildbot.pypy.org Tue Apr 1 05:39:23 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Tue, 1 Apr 2014 05:39:23 +0200 (CEST) Subject: [pypy-commit] pypy ast-issue1673: solved issue 1713 and issue 1673 Message-ID: <20140401033923.F08871C0161@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: ast-issue1673 Changeset: r70364:7f42f2f479e4 Date: 2014-03-30 03:30 -0400 http://bitbucket.org/pypy/pypy/changeset/7f42f2f479e4/ Log: solved issue 1713 and issue 1673 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -49,13 +49,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) @@ -2913,7 +2919,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 1 def Expression_del_body(space, w_self): @@ -3011,7 +3017,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def stmt_del_lineno(space, w_self): @@ -3038,7 +3044,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def stmt_del_col_offset(space, w_self): @@ -3074,7 +3080,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def FunctionDef_del_name(space, w_self): @@ -3101,7 +3107,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'args') + w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state |= 8 def FunctionDef_del_args(space, w_self): @@ -3201,7 +3207,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def ClassDef_del_name(space, w_self): @@ -3326,7 +3332,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Return_del_value(space, w_self): @@ -3448,7 +3454,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 8 def Assign_del_value(space, w_self): @@ -3503,7 +3509,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'target') + w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state |= 4 def AugAssign_del_target(space, w_self): @@ -3561,7 +3567,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 16 def AugAssign_del_value(space, w_self): @@ -3616,7 +3622,7 @@ w_self.setdictvalue(space, 'dest', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'dest') + w_self.setdictvalue(space, 'dest', w_new_value) w_self.initialization_state |= 4 def Print_del_dest(space, w_self): @@ -3665,7 +3671,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'nl') + w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state |= 16 def Print_del_nl(space, w_self): @@ -3721,7 +3727,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'target') + w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state |= 4 def For_del_target(space, w_self): @@ -3750,7 +3756,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'iter') + w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state |= 8 def For_del_iter(space, w_self): @@ -3852,7 +3858,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def While_del_test(space, w_self): @@ -3953,7 +3959,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def If_del_test(space, w_self): @@ -4054,7 +4060,7 @@ w_self.setdictvalue(space, 'context_expr', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'context_expr') + w_self.setdictvalue(space, 'context_expr', w_new_value) w_self.initialization_state |= 4 def With_del_context_expr(space, w_self): @@ -4083,7 +4089,7 @@ w_self.setdictvalue(space, 'optional_vars', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'optional_vars') + w_self.setdictvalue(space, 'optional_vars', w_new_value) w_self.initialization_state |= 8 def With_del_optional_vars(space, w_self): @@ -4161,7 +4167,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'type') + w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state |= 4 def Raise_del_type(space, w_self): @@ -4190,7 +4196,7 @@ w_self.setdictvalue(space, 'inst', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'inst') + w_self.setdictvalue(space, 'inst', w_new_value) w_self.initialization_state |= 8 def Raise_del_inst(space, w_self): @@ -4219,7 +4225,7 @@ w_self.setdictvalue(space, 'tback', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'tback') + w_self.setdictvalue(space, 'tback', w_new_value) w_self.initialization_state |= 16 def Raise_del_tback(space, w_self): @@ -4440,7 +4446,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def Assert_del_test(space, w_self): @@ -4469,7 +4475,7 @@ w_self.setdictvalue(space, 'msg', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'msg') + w_self.setdictvalue(space, 'msg', w_new_value) w_self.initialization_state |= 8 def Assert_del_msg(space, w_self): @@ -4571,7 +4577,7 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'module') + w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 def ImportFrom_del_module(space, w_self): @@ -4620,7 +4626,7 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'level') + w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 def ImportFrom_del_level(space, w_self): @@ -4676,7 +4682,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 4 def Exec_del_body(space, w_self): @@ -4705,7 +4711,7 @@ w_self.setdictvalue(space, 'globals', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'globals') + w_self.setdictvalue(space, 'globals', w_new_value) w_self.initialization_state |= 8 def Exec_del_globals(space, w_self): @@ -4734,7 +4740,7 @@ w_self.setdictvalue(space, 'locals', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'locals') + w_self.setdictvalue(space, 'locals', w_new_value) w_self.initialization_state |= 16 def Exec_del_locals(space, w_self): @@ -4836,7 +4842,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Expr_del_value(space, w_self): @@ -4938,7 +4944,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def expr_del_lineno(space, w_self): @@ -4965,7 +4971,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def expr_del_col_offset(space, w_self): @@ -5080,7 +5086,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'left') + w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state |= 4 def BinOp_del_left(space, w_self): @@ -5138,7 +5144,7 @@ w_self.setdictvalue(space, 'right', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'right') + w_self.setdictvalue(space, 'right', w_new_value) w_self.initialization_state |= 16 def BinOp_del_right(space, w_self): @@ -5222,7 +5228,7 @@ w_self.setdictvalue(space, 'operand', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'operand') + w_self.setdictvalue(space, 'operand', w_new_value) w_self.initialization_state |= 8 def UnaryOp_del_operand(space, w_self): @@ -5274,7 +5280,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'args') + w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state |= 4 def Lambda_del_args(space, w_self): @@ -5303,7 +5309,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 8 def Lambda_del_body(space, w_self): @@ -5357,7 +5363,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def IfExp_del_test(space, w_self): @@ -5386,7 +5392,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 8 def IfExp_del_body(space, w_self): @@ -5415,7 +5421,7 @@ w_self.setdictvalue(space, 'orelse', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'orelse') + w_self.setdictvalue(space, 'orelse', w_new_value) w_self.initialization_state |= 16 def IfExp_del_orelse(space, w_self): @@ -5588,7 +5594,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'elt') + w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state |= 4 def ListComp_del_elt(space, w_self): @@ -5665,7 +5671,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'elt') + w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state |= 4 def SetComp_del_elt(space, w_self): @@ -5742,7 +5748,7 @@ w_self.setdictvalue(space, 'key', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'key') + w_self.setdictvalue(space, 'key', w_new_value) w_self.initialization_state |= 4 def DictComp_del_key(space, w_self): @@ -5771,7 +5777,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 8 def DictComp_del_value(space, w_self): @@ -5849,7 +5855,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'elt') + w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state |= 4 def GeneratorExp_del_elt(space, w_self): @@ -5926,7 +5932,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Yield_del_value(space, w_self): @@ -5979,7 +5985,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'left') + w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state |= 4 def Compare_del_left(space, w_self): @@ -6080,7 +6086,7 @@ w_self.setdictvalue(space, 'func', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'func') + w_self.setdictvalue(space, 'func', w_new_value) w_self.initialization_state |= 4 def Call_del_func(space, w_self): @@ -6153,7 +6159,7 @@ w_self.setdictvalue(space, 'starargs', w_new_value) w_self.initialization_state &= ~32 return - w_self.deldictvalue(space, 'starargs') + w_self.setdictvalue(space, 'starargs', w_new_value) w_self.initialization_state |= 32 def Call_del_starargs(space, w_self): @@ -6182,7 +6188,7 @@ w_self.setdictvalue(space, 'kwargs', w_new_value) w_self.initialization_state &= ~64 return - w_self.deldictvalue(space, 'kwargs') + w_self.setdictvalue(space, 'kwargs', w_new_value) w_self.initialization_state |= 64 def Call_del_kwargs(space, w_self): @@ -6241,7 +6247,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Repr_del_value(space, w_self): @@ -6292,7 +6298,7 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'n') + w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 def Num_del_n(space, w_self): @@ -6343,7 +6349,7 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Str_del_s(space, w_self): @@ -6396,7 +6402,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Attribute_del_value(space, w_self): @@ -6423,7 +6429,7 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'attr') + w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 def Attribute_del_attr(space, w_self): @@ -6507,7 +6513,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Subscript_del_value(space, w_self): @@ -6536,7 +6542,7 @@ w_self.setdictvalue(space, 'slice', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'slice') + w_self.setdictvalue(space, 'slice', w_new_value) w_self.initialization_state |= 8 def Subscript_del_slice(space, w_self): @@ -6618,7 +6624,7 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'id') + w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 def Name_del_id(space, w_self): @@ -6853,7 +6859,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Const_del_value(space, w_self): @@ -6979,7 +6985,7 @@ w_self.setdictvalue(space, 'lower', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lower') + w_self.setdictvalue(space, 'lower', w_new_value) w_self.initialization_state |= 1 def Slice_del_lower(space, w_self): @@ -7008,7 +7014,7 @@ w_self.setdictvalue(space, 'upper', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'upper') + w_self.setdictvalue(space, 'upper', w_new_value) w_self.initialization_state |= 2 def Slice_del_upper(space, w_self): @@ -7037,7 +7043,7 @@ w_self.setdictvalue(space, 'step', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'step') + w_self.setdictvalue(space, 'step', w_new_value) w_self.initialization_state |= 4 def Slice_del_step(space, w_self): @@ -7139,7 +7145,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 1 def Index_del_value(space, w_self): @@ -7416,7 +7422,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'target') + w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state |= 1 def comprehension_del_target(space, w_self): @@ -7445,7 +7451,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'iter') + w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state |= 2 def comprehension_del_iter(space, w_self): @@ -7521,7 +7527,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def excepthandler_del_lineno(space, w_self): @@ -7548,7 +7554,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def excepthandler_del_col_offset(space, w_self): @@ -7586,7 +7592,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'type') + w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state |= 4 def ExceptHandler_del_type(space, w_self): @@ -7615,7 +7621,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 8 def ExceptHandler_del_name(space, w_self): @@ -7716,7 +7722,7 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'vararg') + w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 def arguments_del_vararg(space, w_self): @@ -7746,7 +7752,7 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'kwarg') + w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 4 def arguments_del_kwarg(space, w_self): @@ -7824,7 +7830,7 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def keyword_del_arg(space, w_self): @@ -7853,7 +7859,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 2 def keyword_del_value(space, w_self): @@ -7905,7 +7911,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 def alias_del_name(space, w_self): @@ -7935,7 +7941,7 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'asname') + w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 def alias_del_asname(space, w_self): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -480,7 +480,9 @@ self.emit("w_self.setdictvalue(space, '%s', w_new_value)" % (field.name,), 1) else: - self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) + #self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) + self.emit("w_self.setdictvalue(space, '%s', w_new_value)" + % (field.name,), 1) self.emit("w_self.initialization_state |= %s" % (flag,), 1) self.emit("") @@ -596,13 +598,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) From noreply at buildbot.pypy.org Tue Apr 1 05:39:25 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Tue, 1 Apr 2014 05:39:25 +0200 (CEST) Subject: [pypy-commit] pypy ast-issue1673: dd tests for issue 1713 and issue 1673 Message-ID: <20140401033925.40D901C0161@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: ast-issue1673 Changeset: r70365:04b3993dd3e4 Date: 2014-03-30 03:31 -0400 http://bitbucket.org/pypy/pypy/changeset/04b3993dd3e4/ Log: dd tests for issue 1713 and issue 1673 diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -387,3 +387,40 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + + def test_dict_astNode(self): + import ast + num_node = ast.Num(n=2, lineno=2, col_offset=3) + dict_res = num_node.__dict__ + + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Num_notfullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 + num_node2 = copy.deepcopy(num_node) + + def test_issue1673_Num_fullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset + dict_res = num_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Str(self): + import ast + import copy + str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node.lineno == 2 + str_node2 = copy.deepcopy(str_node) + dict_res = str_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2} + \ No newline at end of file From noreply at buildbot.pypy.org Tue Apr 1 05:39:26 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Tue, 1 Apr 2014 05:39:26 +0200 (CEST) Subject: [pypy-commit] pypy ast-issue1673: add the set the save_original_object flag to be true when field type is in asdl.builtin_types Message-ID: <20140401033926.E04751C0161@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: ast-issue1673 Changeset: r70366:41cb7a56b1f5 Date: 2014-03-31 22:01 -0400 http://bitbucket.org/pypy/pypy/changeset/41cb7a56b1f5/ Log: add the set the save_original_object flag to be true when field type is in asdl.builtin_types diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -52,7 +52,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -60,7 +60,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass return space.newtuple([space.type(self), space.newtuple([]), @@ -2919,7 +2919,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 1 def Expression_del_body(space, w_self): @@ -3017,6 +3017,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 @@ -3044,6 +3045,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 @@ -3080,6 +3082,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 @@ -3107,7 +3110,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'args', w_new_value) + w_self.deldictvalue(space, 'args') w_self.initialization_state |= 8 def FunctionDef_del_args(space, w_self): @@ -3207,6 +3210,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 @@ -3332,7 +3336,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Return_del_value(space, w_self): @@ -3454,7 +3458,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 8 def Assign_del_value(space, w_self): @@ -3509,7 +3513,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'target', w_new_value) + w_self.deldictvalue(space, 'target') w_self.initialization_state |= 4 def AugAssign_del_target(space, w_self): @@ -3567,7 +3571,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 16 def AugAssign_del_value(space, w_self): @@ -3622,7 +3626,7 @@ w_self.setdictvalue(space, 'dest', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'dest', w_new_value) + w_self.deldictvalue(space, 'dest') w_self.initialization_state |= 4 def Print_del_dest(space, w_self): @@ -3671,6 +3675,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state &= ~16 return + # need to save the original object too w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state |= 16 @@ -3727,7 +3732,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'target', w_new_value) + w_self.deldictvalue(space, 'target') w_self.initialization_state |= 4 def For_del_target(space, w_self): @@ -3756,7 +3761,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'iter', w_new_value) + w_self.deldictvalue(space, 'iter') w_self.initialization_state |= 8 def For_del_iter(space, w_self): @@ -3858,7 +3863,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def While_del_test(space, w_self): @@ -3959,7 +3964,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def If_del_test(space, w_self): @@ -4060,7 +4065,7 @@ w_self.setdictvalue(space, 'context_expr', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'context_expr', w_new_value) + w_self.deldictvalue(space, 'context_expr') w_self.initialization_state |= 4 def With_del_context_expr(space, w_self): @@ -4089,7 +4094,7 @@ w_self.setdictvalue(space, 'optional_vars', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'optional_vars', w_new_value) + w_self.deldictvalue(space, 'optional_vars') w_self.initialization_state |= 8 def With_del_optional_vars(space, w_self): @@ -4167,7 +4172,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'type', w_new_value) + w_self.deldictvalue(space, 'type') w_self.initialization_state |= 4 def Raise_del_type(space, w_self): @@ -4196,7 +4201,7 @@ w_self.setdictvalue(space, 'inst', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'inst', w_new_value) + w_self.deldictvalue(space, 'inst') w_self.initialization_state |= 8 def Raise_del_inst(space, w_self): @@ -4225,7 +4230,7 @@ w_self.setdictvalue(space, 'tback', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'tback', w_new_value) + w_self.deldictvalue(space, 'tback') w_self.initialization_state |= 16 def Raise_del_tback(space, w_self): @@ -4446,7 +4451,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def Assert_del_test(space, w_self): @@ -4475,7 +4480,7 @@ w_self.setdictvalue(space, 'msg', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'msg', w_new_value) + w_self.deldictvalue(space, 'msg') w_self.initialization_state |= 8 def Assert_del_msg(space, w_self): @@ -4577,6 +4582,7 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 @@ -4626,6 +4632,7 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return + # need to save the original object too w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 @@ -4682,7 +4689,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 4 def Exec_del_body(space, w_self): @@ -4711,7 +4718,7 @@ w_self.setdictvalue(space, 'globals', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'globals', w_new_value) + w_self.deldictvalue(space, 'globals') w_self.initialization_state |= 8 def Exec_del_globals(space, w_self): @@ -4740,7 +4747,7 @@ w_self.setdictvalue(space, 'locals', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'locals', w_new_value) + w_self.deldictvalue(space, 'locals') w_self.initialization_state |= 16 def Exec_del_locals(space, w_self): @@ -4842,7 +4849,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Expr_del_value(space, w_self): @@ -4944,6 +4951,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 @@ -4971,6 +4979,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 @@ -5086,7 +5095,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'left', w_new_value) + w_self.deldictvalue(space, 'left') w_self.initialization_state |= 4 def BinOp_del_left(space, w_self): @@ -5144,7 +5153,7 @@ w_self.setdictvalue(space, 'right', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'right', w_new_value) + w_self.deldictvalue(space, 'right') w_self.initialization_state |= 16 def BinOp_del_right(space, w_self): @@ -5228,7 +5237,7 @@ w_self.setdictvalue(space, 'operand', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'operand', w_new_value) + w_self.deldictvalue(space, 'operand') w_self.initialization_state |= 8 def UnaryOp_del_operand(space, w_self): @@ -5280,7 +5289,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'args', w_new_value) + w_self.deldictvalue(space, 'args') w_self.initialization_state |= 4 def Lambda_del_args(space, w_self): @@ -5309,7 +5318,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 8 def Lambda_del_body(space, w_self): @@ -5363,7 +5372,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def IfExp_del_test(space, w_self): @@ -5392,7 +5401,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 8 def IfExp_del_body(space, w_self): @@ -5421,7 +5430,7 @@ w_self.setdictvalue(space, 'orelse', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'orelse', w_new_value) + w_self.deldictvalue(space, 'orelse') w_self.initialization_state |= 16 def IfExp_del_orelse(space, w_self): @@ -5594,7 +5603,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'elt', w_new_value) + w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 def ListComp_del_elt(space, w_self): @@ -5671,7 +5680,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'elt', w_new_value) + w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 def SetComp_del_elt(space, w_self): @@ -5748,7 +5757,7 @@ w_self.setdictvalue(space, 'key', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'key', w_new_value) + w_self.deldictvalue(space, 'key') w_self.initialization_state |= 4 def DictComp_del_key(space, w_self): @@ -5777,7 +5786,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 8 def DictComp_del_value(space, w_self): @@ -5855,7 +5864,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'elt', w_new_value) + w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 def GeneratorExp_del_elt(space, w_self): @@ -5932,7 +5941,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Yield_del_value(space, w_self): @@ -5985,7 +5994,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'left', w_new_value) + w_self.deldictvalue(space, 'left') w_self.initialization_state |= 4 def Compare_del_left(space, w_self): @@ -6086,7 +6095,7 @@ w_self.setdictvalue(space, 'func', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'func', w_new_value) + w_self.deldictvalue(space, 'func') w_self.initialization_state |= 4 def Call_del_func(space, w_self): @@ -6159,7 +6168,7 @@ w_self.setdictvalue(space, 'starargs', w_new_value) w_self.initialization_state &= ~32 return - w_self.setdictvalue(space, 'starargs', w_new_value) + w_self.deldictvalue(space, 'starargs') w_self.initialization_state |= 32 def Call_del_starargs(space, w_self): @@ -6188,7 +6197,7 @@ w_self.setdictvalue(space, 'kwargs', w_new_value) w_self.initialization_state &= ~64 return - w_self.setdictvalue(space, 'kwargs', w_new_value) + w_self.deldictvalue(space, 'kwargs') w_self.initialization_state |= 64 def Call_del_kwargs(space, w_self): @@ -6247,7 +6256,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Repr_del_value(space, w_self): @@ -6298,6 +6307,7 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 @@ -6349,6 +6359,7 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 @@ -6402,7 +6413,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Attribute_del_value(space, w_self): @@ -6429,6 +6440,7 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return + # need to save the original object too w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 @@ -6513,7 +6525,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Subscript_del_value(space, w_self): @@ -6542,7 +6554,7 @@ w_self.setdictvalue(space, 'slice', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'slice', w_new_value) + w_self.deldictvalue(space, 'slice') w_self.initialization_state |= 8 def Subscript_del_slice(space, w_self): @@ -6624,6 +6636,7 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 @@ -6859,6 +6872,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 @@ -6985,7 +6999,7 @@ w_self.setdictvalue(space, 'lower', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'lower', w_new_value) + w_self.deldictvalue(space, 'lower') w_self.initialization_state |= 1 def Slice_del_lower(space, w_self): @@ -7014,7 +7028,7 @@ w_self.setdictvalue(space, 'upper', w_new_value) w_self.initialization_state &= ~2 return - w_self.setdictvalue(space, 'upper', w_new_value) + w_self.deldictvalue(space, 'upper') w_self.initialization_state |= 2 def Slice_del_upper(space, w_self): @@ -7043,7 +7057,7 @@ w_self.setdictvalue(space, 'step', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'step', w_new_value) + w_self.deldictvalue(space, 'step') w_self.initialization_state |= 4 def Slice_del_step(space, w_self): @@ -7145,7 +7159,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 1 def Index_del_value(space, w_self): @@ -7422,7 +7436,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'target', w_new_value) + w_self.deldictvalue(space, 'target') w_self.initialization_state |= 1 def comprehension_del_target(space, w_self): @@ -7451,7 +7465,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~2 return - w_self.setdictvalue(space, 'iter', w_new_value) + w_self.deldictvalue(space, 'iter') w_self.initialization_state |= 2 def comprehension_del_iter(space, w_self): @@ -7527,6 +7541,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 @@ -7554,6 +7569,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 @@ -7592,7 +7608,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'type', w_new_value) + w_self.deldictvalue(space, 'type') w_self.initialization_state |= 4 def ExceptHandler_del_type(space, w_self): @@ -7621,7 +7637,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'name', w_new_value) + w_self.deldictvalue(space, 'name') w_self.initialization_state |= 8 def ExceptHandler_del_name(space, w_self): @@ -7722,6 +7738,7 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 @@ -7752,6 +7769,7 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 4 @@ -7830,6 +7848,7 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 @@ -7859,7 +7878,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~2 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 2 def keyword_del_value(space, w_self): @@ -7911,6 +7930,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 @@ -7941,6 +7961,7 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -459,6 +459,7 @@ self.emit("raise OperationError(space.w_TypeError, " "space.w_None)", 3) else: + save_original_object = True level = 2 if field.opt and field.type.value != "int": self.emit("if space.is_w(w_new_value, space.w_None):", 2) @@ -480,9 +481,7 @@ self.emit("w_self.setdictvalue(space, '%s', w_new_value)" % (field.name,), 1) else: - #self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) - self.emit("w_self.setdictvalue(space, '%s', w_new_value)" - % (field.name,), 1) + self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) self.emit("w_self.initialization_state |= %s" % (flag,), 1) self.emit("") @@ -601,7 +600,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -609,7 +608,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass return space.newtuple([space.type(self), space.newtuple([]), From noreply at buildbot.pypy.org Tue Apr 1 05:39:28 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Tue, 1 Apr 2014 05:39:28 +0200 (CEST) Subject: [pypy-commit] pypy ast-issue1673: modify exception to OperationError Message-ID: <20140401033928.369A21C0161@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: ast-issue1673 Changeset: r70367:9e6634cd63f4 Date: 2014-03-31 22:55 -0400 http://bitbucket.org/pypy/pypy/changeset/9e6634cd63f4/ Log: modify exception to OperationError diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -52,7 +52,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -60,7 +60,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass return space.newtuple([space.type(self), space.newtuple([]), diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -600,7 +600,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -608,7 +608,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass return space.newtuple([space.type(self), space.newtuple([]), From noreply at buildbot.pypy.org Tue Apr 1 09:26:27 2014 From: noreply at buildbot.pypy.org (vlukas) Date: Tue, 1 Apr 2014 09:26:27 +0200 (CEST) Subject: [pypy-commit] pypy default: embedding.rst edited online with Bitbucket Message-ID: <20140401072627.A399F1C0034@cobra.cs.uni-duesseldorf.de> Author: Lukas Vacek Branch: Changeset: r70368:4221b74e1e8a Date: 2014-03-24 23:26 +0000 http://bitbucket.org/pypy/pypy/changeset/4221b74e1e8a/ Log: embedding.rst edited online with Bitbucket diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -1,4 +1,3 @@ - Embedding PyPy -------------- @@ -51,6 +50,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in <=2.2.1 yet + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. From noreply at buildbot.pypy.org Tue Apr 1 09:26:28 2014 From: noreply at buildbot.pypy.org (vlukas) Date: Tue, 1 Apr 2014 09:26:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Embedding documentation updated Message-ID: <20140401072628.E64701C0034@cobra.cs.uni-duesseldorf.de> Author: Lukas Vacek Branch: Changeset: r70369:fefe963624fe Date: 2014-03-26 01:04 +0000 http://bitbucket.org/pypy/pypy/changeset/fefe963624fe/ Log: Embedding documentation updated diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -1,3 +1,4 @@ + Embedding PyPy -------------- @@ -50,7 +51,7 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); - .. note:: Not available in <=2.2.1 yet + .. note:: Not available in PyPy <= 2.2.1 Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -75,7 +76,6 @@ .. code-block:: c - #include "include/PyPy.h" #include const char source[] = "print 'hello from pypy'"; @@ -101,9 +101,26 @@ Worked! +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nigthly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -162,6 +179,95 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/home/lukas/dev/pypy-2.2.1-src/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's currently no "clean" way (pkg-config comes to mind) how to find this path. You can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- From noreply at buildbot.pypy.org Tue Apr 1 09:26:30 2014 From: noreply at buildbot.pypy.org (vlukas) Date: Tue, 1 Apr 2014 09:26:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Embedding documentation - fixing few typos Message-ID: <20140401072630.2F16E1C0034@cobra.cs.uni-duesseldorf.de> Author: Lukas Vacek Branch: Changeset: r70370:3285a45ff1bd Date: 2014-03-26 01:11 +0000 http://bitbucket.org/pypy/pypy/changeset/3285a45ff1bd/ Log: Embedding documentation - fixing few typos diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -104,7 +104,7 @@ Missing PyPy.h -------------- -.. note:: PyPy.h is in the nigthly builds and goes to new PyPy releases (>2.2.1). +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): @@ -214,7 +214,7 @@ void *lib, *func; rpython_startup_code(); - res = pypy_setup_home("/home/lukas/dev/pypy-2.2.1-src/libpypy-c.so", 1); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); if (res) { printf("Error setting pypy home!\n"); return 1; @@ -238,7 +238,9 @@ Finding pypy_home ----------------- -Function pypy_setup_home takes one parameter - the path to libpypy. There's currently no "clean" way (pkg-config comes to mind) how to find this path. You can try the following (GNU-specific) hack (don't forget to link against *dl*): +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): .. code-block:: c From noreply at buildbot.pypy.org Tue Apr 1 09:26:31 2014 From: noreply at buildbot.pypy.org (vlukas) Date: Tue, 1 Apr 2014 09:26:31 +0200 (CEST) Subject: [pypy-commit] pypy default: docs: embedding: Fixing simple example to include PyPy.h Message-ID: <20140401072631.6B20F1C0034@cobra.cs.uni-duesseldorf.de> Author: Lukas Vacek Branch: Changeset: r70371:317174d5552a Date: 2014-03-31 21:33 +0100 http://bitbucket.org/pypy/pypy/changeset/317174d5552a/ Log: docs: embedding: Fixing simple example to include PyPy.h diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -76,6 +76,7 @@ .. code-block:: c + #include "include/PyPy.h" #include const char source[] = "print 'hello from pypy'"; @@ -101,6 +102,9 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + Missing PyPy.h -------------- From noreply at buildbot.pypy.org Tue Apr 1 09:26:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Apr 2014 09:26:32 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in vlukas/pypy (pull request #215) Message-ID: <20140401072632.B01A41C0034@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70372:aa9f9b6f0ac3 Date: 2014-04-01 09:25 +0200 http://bitbucket.org/pypy/pypy/changeset/aa9f9b6f0ac3/ Log: Merged in vlukas/pypy (pull request #215) Updating embedding.rst documentation to match PyPy 2.2.1 diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- From noreply at buildbot.pypy.org Tue Apr 1 09:35:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Apr 2014 09:35:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Rename "interpret" to "normal-execution", now that it also includes the Message-ID: <20140401073527.D0E931C0034@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70373:eb3714149402 Date: 2014-04-01 09:34 +0200 http://bitbucket.org/pypy/pypy/changeset/eb3714149402/ Log: Rename "interpret" to "normal-execution", now that it also includes the execution time in jit-generated machine code. diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -410,7 +410,7 @@ total = sum([b for a, b in l]) for a, b in l: if a is None: - a = 'interpret' + a = 'normal-execution' s = " " * (50 - len(a)) print >>outfile, a, s, str(b*100/total) + "%" if out != '-': From noreply at buildbot.pypy.org Tue Apr 1 09:40:33 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Tue, 1 Apr 2014 09:40:33 +0200 (CEST) Subject: [pypy-commit] cffi default: Write out buggy line when exception during _internal_parse Message-ID: <20140401074033.AA08A1C0034@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: Changeset: r1483:394b200a1724 Date: 2014-03-31 21:10 +0800 http://bitbucket.org/cffi/cffi/changeset/394b200a1724/ Log: Write out buggy line when exception during _internal_parse diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -128,9 +128,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +143,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,7 +167,7 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros for key, value in macros.items(): value = value.strip() @@ -176,26 +183,32 @@ break # for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) - else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + print("*** Error: %s" % msg) + raise def _parse_decl(self, decl): node = decl.type @@ -227,7 +240,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +319,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +347,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -506,8 +521,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: From noreply at buildbot.pypy.org Tue Apr 1 09:40:34 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Tue, 1 Apr 2014 09:40:34 +0200 (CEST) Subject: [pypy-commit] cffi default: Move try ... except outside of a for loop Message-ID: <20140401074034.E25721C0034@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: Changeset: r1484:3d11d62d6ed9 Date: 2014-04-01 08:16 +0800 http://bitbucket.org/cffi/cffi/changeset/3d11d62d6ed9/ Log: Move try ... except outside of a for loop diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -182,8 +182,8 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - try: + try: + for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) elif isinstance(decl, pycparser.c_ast.Typedef): @@ -204,11 +204,11 @@ self._declare('typedef ' + decl.name, realtype) else: raise api.CDefError("unrecognized construct", decl) - except api.FFIError as e: - msg = self._convert_pycparser_error(e, csource) - if msg: - print("*** Error: %s" % msg) - raise + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + print("*** Error: %s" % msg) + raise def _parse_decl(self, decl): node = decl.type From noreply at buildbot.pypy.org Tue Apr 1 09:40:36 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Tue, 1 Apr 2014 09:40:36 +0200 (CEST) Subject: [pypy-commit] cffi default: Update exception args instead of use print on re-raise FFIError Message-ID: <20140401074036.054C31C0034@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: Changeset: r1485:52c29b6665c2 Date: 2014-04-01 08:29 +0800 http://bitbucket.org/cffi/cffi/changeset/52c29b6665c2/ Log: Update exception args instead of use print on re-raise FFIError diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -207,7 +207,7 @@ except api.FFIError as e: msg = self._convert_pycparser_error(e, csource) if msg: - print("*** Error: %s" % msg) + e.args = (e.args[0] + "\n *** Err: %s" % msg,) raise def _parse_decl(self, decl): From noreply at buildbot.pypy.org Tue Apr 1 09:41:46 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 1 Apr 2014 09:41:46 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fix to make the no_specialized_storage flag work. Message-ID: <20140401074146.B947F1C0034@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r731:b645dc5a80e1 Date: 2014-03-31 20:00 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b645dc5a80e1/ Log: Fix to make the no_specialized_storage flag work. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -65,6 +65,9 @@ return self._w_self.store_with_new_storage(new_storage, n0, w_val) def can_contain(self, w_val): return self.static_can_contain(self.space, w_val) + @staticmethod + def static_can_contain(space, w_val): + raise NotImplementedError() def do_store(self, n0, w_val): raise NotImplementedError() def generalized_strategy_for(self, w_val): From noreply at buildbot.pypy.org Tue Apr 1 09:41:47 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 1 Apr 2014 09:41:47 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added test for interpreter. Message-ID: <20140401074147.D8CAA1C0034@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r732:18eb8c008b4e Date: 2014-03-31 21:04 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/18eb8c008b4e/ Log: Added test for interpreter. diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -890,10 +890,23 @@ s_frame.push(w(fakeliterals(space, "baz"))) step_in_interp(s_frame) array = s_frame.pop() + assert array.size() == 3 assert space.unwrap_array(array.at0(space, 0)) == fakeliterals(space, "egg") assert space.unwrap_array(array.at0(space, 1)) == fakeliterals(space, "bar") assert space.unwrap_array(array.at0(space, 2)) == fakeliterals(space, "baz") +def test_bc_pushNewArrayBytecode_noPopIntoArray(bytecode=pushNewArrayBytecode): + w_frame, s_frame = new_frame(bytecode + chr(0x02)) + s_frame.push(w("egg")) + s_frame.push(w("bar")) + step_in_interp(s_frame) + array = s_frame.pop() + assert array.size() == 2 + assert array.at0(space, 0).is_nil(space) + assert array.at0(space, 1).is_nil(space) + assert s_frame.pop().as_string() == "bar" + assert s_frame.pop().as_string() == "egg" + def test_bc_pushNewArray(bytecode=pushNewArrayBytecode): w_frame, s_frame = new_frame(bytecode + chr(0x07)) step_in_interp(s_frame) From noreply at buildbot.pypy.org Tue Apr 1 09:41:49 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 1 Apr 2014 09:41:49 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added support for loading weak objects from an image. Important. Message-ID: <20140401074149.003291C0034@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r733:9f145aa73461 Date: 2014-03-31 21:18 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/9f145aa73461/ Log: Added support for loading weak objects from an image. Important. --strategy-stats can be used to see how many weak objects are loaded. Extended the test for weak pointers. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -571,7 +571,7 @@ pointers = g_self.get_pointers() # TODO -- Also handle weak objects loaded from images. from spyvm.shadow import find_storage_for_objects - storage = find_storage_for_objects(space, pointers)(space, self, len(pointers)) + storage = find_storage_for_objects(space, pointers, g_self.isweak())(space, self, len(pointers)) self.store_shadow(storage) self.store_all(space, pointers) self.log_storage("Filledin", log_classname=False) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -170,13 +170,13 @@ def empty_storage(space, w_self, size, weak=False): if weak: return WeakListStorageShadow(space, w_self, size) - else: - if no_specialized_storage: - return ListStorageShadow(space, w_self, size) - else: - return AllNilStorageShadow(space, w_self, size) + if no_specialized_storage: + return ListStorageShadow(space, w_self, size) + return AllNilStorageShadow(space, w_self, size) -def find_storage_for_objects(space, vars): +def find_storage_for_objects(space, vars, weak=False): + if weak: + return WeakListStorageShadow if no_specialized_storage: return ListStorageShadow specialized_strategies = 3 diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -503,8 +503,11 @@ return self.iswords() and self.space.w_Float.is_same_object(self.g_class.w_object) def ispointers(self): - return self.format < 5 #TODO, what about compiled methods? + return self.format < 5 + def isweak(self): + return self.format == 4 + def iscompiledmethod(self): return 12 <= self.format <= 15 @@ -528,7 +531,6 @@ # the instantiate call circumvents the constructors # and makes empty objects if self.ispointers(): - # XXX self.format == 4 is weak self.w_object = objectmodel.instantiate(model.W_PointersObject) elif self.format == 5: raise CorruptImageError("Unknown format 5") diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -419,13 +419,15 @@ @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): - w_cls = bootstrap_class(1) + w_cls = bootstrap_class(2) s_cls = w_cls.as_class_get_shadow(space) s_cls.instance_kind = WEAK_POINTERS weak_object = s_cls.new() referenced = model.W_SmallInteger(10) + referenced2 = model.W_SmallInteger(20) weak_object.store(space, 0, referenced) + weak_object.store(space, 1, referenced2) assert weak_object.fetch(space, 0) is referenced del referenced @@ -433,3 +435,4 @@ # Thus the reference may linger until the next gc... import gc; gc.collect() assert weak_object.fetch(space, 0).is_nil(space) + assert weak_object.fetch(space, 1).value == 20 From noreply at buildbot.pypy.org Tue Apr 1 10:52:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 1 Apr 2014 10:52:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Refined the compiled_in() method in CompiledMethod. Message-ID: <20140401085211.601731D274E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r734:7637f57d242e Date: 2014-04-01 10:51 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7637f57d242e/ Log: Refined the compiled_in() method in CompiledMethod. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -127,6 +127,12 @@ """Return True, if the receiver represents the nil object in the given Object Space.""" return self.is_same_object(space.w_nil) + def is_class(self, space): + """ Return true, if the receiver seems to be a class. + We can not be completely sure about this (non-class objects might be + used as class).""" + return False + def become(self, other): """Become swaps two objects. False means swapping failed""" @@ -509,12 +515,23 @@ # Don't construct the ClassShadow here, yet! self.w_class = g_self.get_class() + def is_class(self, space): + # This is a class if it's a Metaclass or an instance of a Metaclass. + if self.has_class(): + w_Metaclass = space.classtable["w_Metaclass"] + w_class = self.getclass(space) + if w_Metaclass.is_same_object(w_class): + return True + if w_class.has_class(): + return w_Metaclass.is_same_object(w_class.getclass(space)) + return False + def getclass(self, space): return self.w_class def guess_classname(self): if self.has_class(): - if self.w_class.has_shadow(): + if self.w_class.has_space(): class_shadow = self.class_shadow(self.w_class.space()) return class_shadow.name else: @@ -580,6 +597,12 @@ from shadow import WeakListStorageShadow return isinstance(self.shadow, WeakListStorageShadow) + def is_class(self, space): + from spyvm.shadow import ClassShadow + if isinstance(self.shadow, ClassShadow): + return True + return W_AbstractObjectWithClassReference.is_class(self, space) + def assert_shadow(self): # Failing the following assert most likely indicates a bug. The shadow can only be absent during # the bootstrapping sequence. It will be initialized in the fillin() method. Before that, it should @@ -715,6 +738,10 @@ def has_shadow(self): return self._get_shadow() is not None + def has_space(self): + # The space is accessed through the shadow. + return self.has_shadow() + def _become(self, w_other): assert isinstance(w_other, W_PointersObject) self.shadow, w_other.shadow = w_other.shadow, self.shadow @@ -1258,16 +1285,21 @@ def compiled_in(self): w_compiledin = self.w_compiledin if not w_compiledin: - if self.literals: - # (Blue book, p 607) All CompiledMethods that contain - # extended-super bytecodes have the clain which they are found as - # their last literal variable. - # Last of the literals is an association with compiledin as a class - w_association = self.literals[-1] - if isinstance(w_association, W_PointersObject) and w_association.size() >= 2: - from spyvm import wrapper - association = wrapper.AssociationWrapper(w_association.space(), w_association) - w_compiledin = association.value() + literals = self.literals + if literals and len(literals) > 0: + # (Blue book, p 607) Last of the literals is either the containing class + # or an association with compiledin as a class + w_candidate = literals[-1] + if isinstance(w_candidate, W_PointersObject) and w_candidate.has_space(): + space = w_candidate.space() # Not pretty to steal the space from another object. + if w_candidate.is_class(space): + w_compiledin = w_candidate + elif w_candidate.size() >= 2: + from spyvm import wrapper + association = wrapper.AssociationWrapper(space, w_candidate) + w_candidate = association.value() + if w_candidate.is_class(space): + w_compiledin = w_candidate self.w_compiledin = w_compiledin return w_compiledin @@ -1378,29 +1410,11 @@ return retval + "---------------------\n" def guess_containing_classname(self): - from spyvm.shadow import ClassShadow - guessed_classname = None - if len(self.literals) > 0: - w_candidate = self.literals[-1] - if isinstance(w_candidate, W_PointersObject): - c_shadow = w_candidate._get_shadow() - if isinstance(c_shadow, ClassShadow): - guessed_classname = c_shadow.getname() - elif w_candidate.size() >= 2: - w_class = w_candidate.fetch(None, 1) - if isinstance(w_class, W_PointersObject): - d_shadow = w_class._get_shadow() - if isinstance(d_shadow, ClassShadow): - guessed_classname = d_shadow.getname() - if guessed_classname: - class_cutoff = len(guessed_classname) - 6 - if class_cutoff > 0: - classname = guessed_classname[0:class_cutoff] - else: - classname = guessed_classname - else: - classname = "" - return classname + w_class = self.compiled_in() + if w_class and w_class.has_space(): + # Not pretty to steal the space from another object. + return w_class.as_class_get_shadow(w_class.space()).getname() + return "? (no compiledin-info)" def get_identifier_string(self): return "%s >> #%s" % (self.guess_containing_classname(), self._likely_methodname) diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -123,7 +123,7 @@ return model.W_PointersObject(space, None, size) def test_w_compiledin_assoc(): - val = new_object() + val = bootstrap_class(0) assoc = new_object(2) assoc.store(space, 0, new_object()) assoc.store(space, 1, val) From noreply at buildbot.pypy.org Tue Apr 1 10:52:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 1 Apr 2014 10:52:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Enhanced tracing-output of interpreter. Message-ID: <20140401085212.81DD81D274E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r735:1bfa9310d02a Date: 2014-04-01 10:52 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/1bfa9310d02a/ Log: Enhanced tracing-output of interpreter. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -18,7 +18,7 @@ def get_printable_location(pc, self, method): bc = ord(method.bytes[pc]) - name = method._likely_methodname + name = method.get_identifier_string() return '%d: [%s]%s (%s)' % (pc, hex(bc), BYTECODE_NAMES[bc], name) @@ -355,8 +355,8 @@ def _sendSelector(self, w_selector, argcount, interp, receiver, receiverclassshadow): if interp.should_trace(): - print "%sSending selector %r to %r with: %r" % ( - interp._last_indent, w_selector.as_repr_string(), receiver, + print "%sSending selector #%s to %r with: %r" % ( + interp._last_indent, w_selector.as_string(), receiver, [self.peek(argcount-1-i) for i in range(argcount)]) assert argcount >= 0 @@ -402,7 +402,7 @@ # ###################################################################### if interp.trace: - print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.as_repr_string()) + print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.as_string()) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() @@ -415,9 +415,9 @@ func = primitives.prim_holder.prim_table[code] # ################################################################## if interp.trace: - print "%s-> primitive %d \t(in #%s, named #%s)" % ( + print "%s-> primitive %d \t(in %s, named #%s)" % ( ' ' * (interp.max_stack_depth - interp.remaining_stack_depth), - code, self.w_method()._likely_methodname, w_selector.as_repr_string()) + code, self.w_method().get_identifier_string(), w_selector.as_string()) try: # note: argcount does not include rcvr return func(interp, self, argcount, w_method) @@ -427,7 +427,7 @@ ' ' * (interp.max_stack_depth - interp.remaining_stack_depth),) if interp.should_trace(True): - print "PRIMITIVE FAILED: %d %s" % (w_method.primitive, w_selector.as_repr_string()) + print "PRIMITIVE FAILED: %d #%s" % (w_method.primitive, w_selector.as_string()) raise e @@ -895,19 +895,19 @@ receiver, receiverclassshadow): options = [False] def next(): interp.message_stepping = True; print 'Now continue (c).' - def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.as_repr_string() + def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.as_string() def pstack(): print s_context.print_stack() if interp.message_stepping: if argcount == 0: - print "-> %s %s" % (receiver.as_repr_string(), - w_selector.as_repr_string()) + print "-> %s #%s" % (receiver.as_repr_string(), + w_selector.as_string()) elif argcount == 1: - print "-> %s %s %s" % (receiver.as_repr_string(), - w_selector.as_repr_string(), + print "-> %s #%s %s" % (receiver.as_repr_string(), + w_selector.as_string(), s_context.peek(0).as_repr_string()) else: - print "-> %s %s %r" % (receiver.as_repr_string(), - w_selector.as_repr_string(), + print "-> %s #%s %r" % (receiver.as_repr_string(), + w_selector.as_string(), [s_context.peek(argcount-1-i) for i in range(argcount)]) import pdb; pdb.set_trace() if options[0]: From noreply at buildbot.pypy.org Tue Apr 1 11:23:04 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 1 Apr 2014 11:23:04 +0200 (CEST) Subject: [pypy-commit] pypy default: make os.fdopen rpython Message-ID: <20140401092304.395AA1C3001@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70374:495dc5472421 Date: 2014-04-01 11:21 +0200 http://bitbucket.org/pypy/pypy/changeset/495dc5472421/ Log: make os.fdopen rpython diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -49,6 +49,11 @@ from rpython.rlib.rfile import create_file return ctx.appcall(create_file, *args_w) + at register_flow_sc(os.fdopen) +def sc_os_fdopen(ctx, *args_w): + from rpython.rlib.rfile import create_fdopen_rfile + return ctx.appcall(create_fdopen_rfile, *args_w) + @register_flow_sc(os.tmpfile) def sc_os_tmpfile(ctx): from rpython.rlib.rfile import create_temp_rfile diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -47,6 +47,7 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal(fileno, [lltype.Ptr(FILE)], rffi.INT) +c_fdopen = llexternal('fdopen', [rffi.INT, rffi.CCHARP], lltype.Ptr(FILE)) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) @@ -93,6 +94,17 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) +def create_fdopen_rfile(fd, mode="r"): + assert mode is not None + ll_mode = rffi.str2charp(mode) + try: + ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) + if not ll_f: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_mode, flavor='raw') + return RFile(ll_f) def create_popen_file(command, type): ll_command = rffi.str2charp(command) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -79,6 +79,22 @@ f() self.interpret(f, []) + def test_fdopen(self): + fname = str(self.tmpdir.join('file_4a')) + + def f(): + f = open(fname, "w") + new_fno = os.dup(f.fileno()) + f2 = os.fdopen(new_fno, "w") + f.close() + f2.write("xxx") + f2.close() + + f() + assert open(fname).read() == "xxx" + self.interpret(f, []) + assert open(fname).read() == "xxx" + def test_fileno(self): fname = str(self.tmpdir.join('file_5')) From noreply at buildbot.pypy.org Tue Apr 1 11:23:05 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 1 Apr 2014 11:23:05 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20140401092305.CEF571C3001@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70375:d4d584ff67d3 Date: 2014-04-01 11:22 +0200 http://bitbucket.org/pypy/pypy/changeset/d4d584ff67d3/ Log: merge diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -410,7 +410,7 @@ total = sum([b for a, b in l]) for a, b in l: if a is None: - a = 'interpret' + a = 'normal-execution' s = " " * (50 - len(a)) print >>outfile, a, s, str(b*100/total) + "%" if out != '-': From noreply at buildbot.pypy.org Tue Apr 1 11:53:53 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 1 Apr 2014 11:53:53 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed compilation. Message-ID: <20140401095354.014CC1C3001@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r736:5713bb4e171f Date: 2014-04-01 11:53 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/5713bb4e171f/ Log: Fixed compilation. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -18,7 +18,7 @@ def get_printable_location(pc, self, method): bc = ord(method.bytes[pc]) - name = method.get_identifier_string() + name = method.safe_identifier_string() return '%d: [%s]%s (%s)' % (pc, hex(bc), BYTECODE_NAMES[bc], name) @@ -356,7 +356,7 @@ receiver, receiverclassshadow): if interp.should_trace(): print "%sSending selector #%s to %r with: %r" % ( - interp._last_indent, w_selector.as_string(), receiver, + interp._last_indent, w_selector.str_content(), receiver, [self.peek(argcount-1-i) for i in range(argcount)]) assert argcount >= 0 @@ -402,7 +402,7 @@ # ###################################################################### if interp.trace: - print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.as_string()) + print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.str_content()) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() @@ -417,7 +417,7 @@ if interp.trace: print "%s-> primitive %d \t(in %s, named #%s)" % ( ' ' * (interp.max_stack_depth - interp.remaining_stack_depth), - code, self.w_method().get_identifier_string(), w_selector.as_string()) + code, self.w_method().get_identifier_string(), w_selector.str_content()) try: # note: argcount does not include rcvr return func(interp, self, argcount, w_method) @@ -427,7 +427,7 @@ ' ' * (interp.max_stack_depth - interp.remaining_stack_depth),) if interp.should_trace(True): - print "PRIMITIVE FAILED: %d #%s" % (w_method.primitive, w_selector.as_string()) + print "PRIMITIVE FAILED: %d #%s" % (w_method.primitive, w_selector.str_content()) raise e @@ -895,19 +895,19 @@ receiver, receiverclassshadow): options = [False] def next(): interp.message_stepping = True; print 'Now continue (c).' - def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.as_string() + def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.str_content() def pstack(): print s_context.print_stack() if interp.message_stepping: if argcount == 0: print "-> %s #%s" % (receiver.as_repr_string(), - w_selector.as_string()) + w_selector.str_content()) elif argcount == 1: print "-> %s #%s %s" % (receiver.as_repr_string(), - w_selector.as_string(), + w_selector.str_content(), s_context.peek(0).as_repr_string()) else: print "-> %s #%s %r" % (receiver.as_repr_string(), - w_selector.as_string(), + w_selector.str_content(), [s_context.peek(argcount-1-i) for i in range(argcount)]) import pdb; pdb.set_trace() if options[0]: diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -817,7 +817,7 @@ return self._size def str_content(self): - return "'%s'" % self.as_string() + return self.as_string() def as_string(self): if self.bytes is not None: @@ -1301,6 +1301,7 @@ if w_candidate.is_class(space): w_compiledin = w_candidate self.w_compiledin = w_compiledin + assert isinstance(w_compiledin, W_PointersObject) return w_compiledin # === Object Access === @@ -1419,6 +1420,19 @@ def get_identifier_string(self): return "%s >> #%s" % (self.guess_containing_classname(), self._likely_methodname) + def safe_identifier_string(self): + if not we_are_translated(): + return self.get_identifier_string() + # This has the same functionality as get_identifier_string, but without calling any + # methods in order to avoid side effects that prevent translation. + w_class = self.w_compiledin + if isinstance(w_class, W_PointersObject): + from spyvm.shadow import ClassShadow + s_class = w_class.shadow + if isinstance(s_class, ClassShadow): + return "%s >> #%s" % (s_class.getname(), self._likely_methodname) + return "#%s" % self._likely_methodname + class DetachingShadowError(Exception): def __init__(self, old_shadow, new_shadow_class): self.old_shadow = old_shadow diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -73,7 +73,7 @@ sizes = self.stats[key] sum = 0 for s in sizes: sum += s - print "%s: %d times, avg size: %f" % (self.key_string(key), len(sizes), sum/len(sizes)) + print "%s: %d times, avg size: %f" % (self.key_string(key), len(sizes), float(sum)/len(sizes)) if self.do_stats_sizes: print " All sizes: %s" % sizes From noreply at buildbot.pypy.org Tue Apr 1 16:42:21 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 1 Apr 2014 16:42:21 +0200 (CEST) Subject: [pypy-commit] pypy default: causes crashes on posix Message-ID: <20140401144221.091911C112D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70376:d0a0274a6fbe Date: 2014-04-01 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/d0a0274a6fbe/ Log: causes crashes on posix diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,7 +219,6 @@ if restype is None: import ctypes restype = ctypes.c_int - self._argtypes_ = argsl self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return From noreply at buildbot.pypy.org Tue Apr 1 21:58:09 2014 From: noreply at buildbot.pypy.org (aliceinwire) Date: Tue, 1 Apr 2014 21:58:09 +0200 (CEST) Subject: [pypy-commit] pypy default: fix small typo Message-ID: <20140401195809.ECDE81D2ABB@cobra.cs.uni-duesseldorf.de> Author: aliceinwire Branch: Changeset: r70377:f64f5a091463 Date: 2014-04-01 17:50 +0900 http://bitbucket.org/pypy/pypy/changeset/f64f5a091463/ Log: fix small typo diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -690,7 +690,7 @@ def setup_bootstrap_path(executable): """ - Try to to as little as possible and to have the stdlib in sys.path. In + Try to do as little as possible and to have the stdlib in sys.path. In particular, we cannot use any unicode at this point, because lots of unicode operations require to be able to import encodings. """ From noreply at buildbot.pypy.org Tue Apr 1 21:58:11 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 1 Apr 2014 21:58:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in aliceinwire/pypy (pull request #221) Message-ID: <20140401195811.798581D2ABB@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r70378:e110863e2400 Date: 2014-04-01 20:57 +0100 http://bitbucket.org/pypy/pypy/changeset/e110863e2400/ Log: Merged in aliceinwire/pypy (pull request #221) fix small typo diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -690,7 +690,7 @@ def setup_bootstrap_path(executable): """ - Try to to as little as possible and to have the stdlib in sys.path. In + Try to do as little as possible and to have the stdlib in sys.path. In particular, we cannot use any unicode at this point, because lots of unicode operations require to be able to import encodings. """ From noreply at buildbot.pypy.org Tue Apr 1 22:21:09 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 1 Apr 2014 22:21:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Py3-ify some cpyext tests. Message-ID: <20140401202109.BBC281C112D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r70379:0f088917abce Date: 2014-04-01 22:17 +0200 http://bitbucket.org/pypy/pypy/changeset/0f088917abce/ Log: Py3-ify some cpyext tests. diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -222,14 +222,14 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) - def test_SetFromErrnoWithFilenameObject__PyString(self): + def test_SetFromErrnoWithFilenameObject__PyUnicode(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; - PyObject *filenameObject = PyString_FromString("/path/to/file"); + PyObject *filenameObject = PyUnicode_FromString("/path/to/file"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); Py_DECREF(filenameObject); return NULL; @@ -241,14 +241,14 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) - def test_SetFromErrnoWithFilenameObject__PyInt(self): + def test_SetFromErrnoWithFilenameObject__PyLong(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; - PyObject *intObject = PyInt_FromLong(3); + PyObject *intObject = PyLong_FromLong(3); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); Py_DECREF(intObject); return NULL; From noreply at buildbot.pypy.org Tue Apr 1 22:30:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 1 Apr 2014 22:30:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140401203033.516D21C112D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70380:6c9441b9c2d5 Date: 2014-04-01 13:29 -0700 http://bitbucket.org/pypy/pypy/changeset/6c9441b9c2d5/ Log: merge default diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -64,6 +64,7 @@ ^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ +^pypy/goal/.+\.lib$ ^pypy/_cache$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py --- a/lib-python/2.7/test/test_file.py +++ b/lib-python/2.7/test/test_file.py @@ -301,6 +301,7 @@ self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) # Reading after iteration hit EOF shouldn't hurt either + f.close() f = self.open(TESTFN, 'rb') try: for line in f: diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,6 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) + f.close() def test_head(self): response = self.request( diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,3 +1,5 @@ +import imp, os + try: import cpyext except ImportError: @@ -10,4 +12,12 @@ pass # obscure condition of _ctypes_test.py being imported by py.test else: import _pypy_testcapi - _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') + cfile = '_ctypes_test.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) + imp.load_module('_ctypes_test', fp, filename, description) + except ImportError: + print 'could not find _ctypes_test in',output_dir + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,22 @@ import os, sys, imp -import tempfile +import tempfile, binascii + +def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], content]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + def _get_c_extension_suffix(): for ext, mod, typ in imp.get_suffixes(): @@ -7,12 +24,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,17 @@ +import imp, os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: - import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + +import _pypy_testcapi +cfile = '_testcapimodule.c' +thisdir = os.path.dirname(__file__) +output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + imp.load_module('_testcapi', fp, filename, description) +except ImportError: + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: http://codespeak.net/pypy/extradoc/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: http://codespeak.net/pypy/extradoc/talk/ep2006/usecases-slides.html -.. _`PyPy introduction at EuroPython 2006`: http://codespeak.net/pypy/extradoc/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: http://codespeak.net/pypy/extradoc/talk/22c3/hpk-tech.html -.. _`PyPy development method`: http://codespeak.net/pypy/extradoc/talk/pycon2006/method_talk.html -.. _`PyPy intro`: http://codespeak.net/pypy/extradoc/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: http://codespeak.net/pypy/extradoc/talk/oscon2003-paper.html -.. _`Architecture introduction slides`: http://codespeak.net/pypy/extradoc/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: http://codespeak.net/pypy/extradoc/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: http://codespeak.net/pypy/extradoc/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: http://codespeak.net/pypy/extradoc/talk/pypy-talk-pycon2005/README.html -.. _`Trouble in Paradise`: http://codespeak.net/pypy/extradoc/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: http://codespeak.net/pypy/extradoc/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf -.. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html +.. _`PyPy 3000`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt +.. _`PyPy introduction at EuroPython 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt +.. _`PyPy intro`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bitbucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt +.. _`Architecture introduction slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt +.. _`Trouble in Paradise`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -14,7 +14,7 @@ The present document describes the specific garbage collectors that we wrote in our framework. -.. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU-report on this topic`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf Garbage collectors currently written for the GC framework diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -389,7 +389,7 @@ .. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html -.. _`py library`: http://pylib.org +.. _`py library`: http://pylib.readthedocs.org/ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -103,7 +103,7 @@ .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy +.. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -700,7 +700,7 @@ def setup_bootstrap_path(executable): """ - Try to to as little as possible and to have the stdlib in sys.path. In + Try to do as little as possible and to have the stdlib in sys.path. In particular, we cannot use any unicode at this point, because lots of unicode operations require to be able to import encodings. """ diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -37,7 +37,7 @@ SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3 SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5 -HAVE_RPOLL = True # Even win32 has rpoll.poll +HAVE_RPOLL = 'poll' in dir(rpoll) constants = {} constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,8 +64,6 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] - # tests are not strictly ansi C compliant, compile as C++ - kwds["compile_extra"].append("/TP") # prevent linking with PythonXX.lib w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -246,9 +246,9 @@ ("test_FromAny", "METH_NOARGS", ''' npy_intp dims[2] ={2, 3}; - PyObject * obj1 = PyArray_SimpleNew(2, dims, 1); + PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); PyArray_FILLWBYTE(obj1, 42); - PyObject * obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); + obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); Py_DECREF(obj1); return obj2; ''' @@ -256,9 +256,9 @@ ("test_FromObject", "METH_NOARGS", ''' npy_intp dims[2] ={2, 3}; - PyObject * obj1 = PyArray_SimpleNew(2, dims, 1); + PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); PyArray_FILLWBYTE(obj1, 42); - PyObject * obj2 = _PyArray_FromObject(obj1, 12, 0, 0); + obj2 = _PyArray_FromObject(obj1, 12, 0, 0); Py_DECREF(obj1); return obj2; ''' diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -228,8 +228,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *filenameObject = PyUnicode_FromString("/path/to/file"); errno = EBADF; - PyObject *filenameObject = PyUnicode_FromString("/path/to/file"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); Py_DECREF(filenameObject); return NULL; @@ -247,8 +247,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *intObject = PyLong_FromLong(3); errno = EBADF; - PyObject *intObject = PyLong_FromLong(3); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); Py_DECREF(intObject); return NULL; @@ -266,8 +266,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); errno = EBADF; - PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); Py_DECREF(lst); return NULL; @@ -285,8 +285,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); errno = EBADF; - PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); Py_DECREF(tuple); return NULL; @@ -304,8 +304,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *none = Py_BuildValue(""); errno = EBADF; - PyObject *none = Py_BuildValue(""); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); Py_DECREF(none); return NULL; diff --git a/pypy/module/test_lib_pypy/test_site_extra.py b/pypy/module/test_lib_pypy/test_site_extra.py --- a/pypy/module/test_lib_pypy/test_site_extra.py +++ b/pypy/module/test_lib_pypy/test_site_extra.py @@ -4,8 +4,11 @@ def test_preimported_modules(): lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', 'exceptions', 'signal', 'sys', 'zipimport'] - g = os.popen('"%s" -c "import sys; print sorted(sys.modules)"' % - (sys.executable,)) + if sys.platform == 'win32': + cmd = '%s' % (sys.executable,) + else: + cmd = '"%s"' % (sys.executable,) + g = os.popen(cmd + ' -c "import sys; print sorted(sys.modules)"') real_data = g.read() g.close() for name in lst: diff --git a/pypy/module/test_lib_pypy/test_testcapi.py b/pypy/module/test_lib_pypy/test_testcapi.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_testcapi.py @@ -0,0 +1,15 @@ +import py, sys + +if '__pypy__' not in sys.builtin_module_names: + py.test.skip('pypy only test') + +from lib_pypy import _testcapi #make sure _testcapi is built + +def test_get_hashed_dir(): + import sys + # This should not compile _testcapi, so the output is empty + script = "import _testcapi; assert 'get_hashed_dir' not in dir(_testcapi)" + output = py.process.cmdexec('''"%s" -c "%s"''' % + (sys.executable, script)) + assert output == '' + diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -151,6 +151,9 @@ '*.c', '*.o')) for file in ['LICENSE', 'README.rst']: shutil.copy(str(basedir.join(file)), str(pypydir)) + for file in ['_testcapimodule.c', '_ctypes_test.c']: + shutil.copyfile(str(basedir.join('lib_pypy', file)), + str(pypydir.join('lib_pypy', file))) # spdir = pypydir.ensure('site-packages', dir=True) shutil.copy(str(basedir.join('site-packages', 'README')), str(spdir)) diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -17,6 +17,8 @@ exe_name_in_archive = 'bin/pypy' pypy_c = py.path.local(pypydir).join('goal', basename) if not pypy_c.check(): + if sys.platform == 'win32': + assert False, "test on win32 requires exe" pypy_c.write("#!/bin/sh") pypy_c.chmod(0755) fake_pypy_c = True @@ -81,6 +83,8 @@ package.USE_ZIPFILE_MODULE = prev def test_fix_permissions(tmpdir): + if sys.platform == 'win32': + py.test.skip('needs to be more general for windows') def check(f, mode): assert f.stat().mode & 0777 == mode # diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -16,7 +16,13 @@ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): raise ValueError("%r is tagged as NOT_RPYTHON" % (func,)) if func.func_code.co_cellvars: - raise ValueError("RPython functions cannot create closures") + raise ValueError( +"""RPython functions cannot create closures +Possible casues: + Function is inner function + Function uses generator expressions + Lambda expressions +in %r""" % (func,)) if not (func.func_code.co_flags & CO_NEWLOCALS): raise ValueError("The code object for a RPython function should have " "the flag CO_NEWLOCALS set.") diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -49,6 +49,11 @@ from rpython.rlib.rfile import create_file return ctx.appcall(create_file, *args_w) + at register_flow_sc(os.fdopen) +def sc_os_fdopen(ctx, *args_w): + from rpython.rlib.rfile import create_fdopen_rfile + return ctx.appcall(create_fdopen_rfile, *args_w) + @register_flow_sc(os.tmpfile) def sc_os_tmpfile(ctx): from rpython.rlib.rfile import create_temp_rfile diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -362,11 +362,18 @@ self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: if self._finish_gcmap: - self._finish_gcmap[0] |= r_uint(0) # r0 + # we're returning with a guard_not_forced_2, and + # additionally we need to say that r0 contains + # a reference too: + self._finish_gcmap[0] |= r_uint(0) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) + elif self._finish_gcmap: + # we're returning with a guard_not_forced_2 + gcmap = self._finish_gcmap + self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather # keep that one and kill all the others diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -916,3 +916,73 @@ cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 + def test_finish_without_gcmap(self): + cpu = self.cpu + + loop = self.parse(""" + [i0] + finish(i0, descr=finaldescr) + """, namespace={'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, 10)) + assert not frame.jf_gcmap + + def test_finish_with_trivial_gcmap(self): + cpu = self.cpu + + loop = self.parse(""" + [p0] + finish(p0, descr=finaldescr) + """, namespace={'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, n)) + assert getmap(frame) == '1' + + def test_finish_with_guard_not_forced_2_ref(self): + cpu = self.cpu + + loop = self.parse(""" + [p0, p1] + guard_not_forced_2(descr=faildescr) [p1] + finish(p0, descr=finaldescr) + """, namespace={'faildescr': BasicFailDescr(1), + 'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, n, n)) + assert getmap(frame).count('1') == 2 + + def test_finish_with_guard_not_forced_2_int(self): + cpu = self.cpu + + loop = self.parse(""" + [i0, p1] + guard_not_forced_2(descr=faildescr) [p1] + finish(i0, descr=finaldescr) + """, namespace={'faildescr': BasicFailDescr(1), + 'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, 10, n)) + assert getmap(frame).count('1') == 1 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1857,11 +1857,18 @@ arglist = op.getarglist() if arglist and arglist[0].type == REF: if self._finish_gcmap: - self._finish_gcmap[0] |= r_uint(1) # rax + # we're returning with a guard_not_forced_2, and + # additionally we need to say that eax/rax contains + # a reference too: + self._finish_gcmap[0] |= r_uint(1) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) + elif self._finish_gcmap: + # we're returning with a guard_not_forced_2 + gcmap = self._finish_gcmap + self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather # keep that one and kill all the others diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -47,6 +47,7 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal(fileno, [lltype.Ptr(FILE)], rffi.INT) +c_fdopen = llexternal('fdopen', [rffi.INT, rffi.CCHARP], lltype.Ptr(FILE)) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) @@ -93,6 +94,17 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) +def create_fdopen_rfile(fd, mode="r"): + assert mode is not None + ll_mode = rffi.str2charp(mode) + try: + ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) + if not ll_f: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_mode, flavor='raw') + return RFile(ll_f) def create_popen_file(command, type): ll_command = rffi.str2charp(command) diff --git a/rpython/rlib/rpoll.py b/rpython/rlib/rpoll.py --- a/rpython/rlib/rpoll.py +++ b/rpython/rlib/rpoll.py @@ -141,8 +141,9 @@ # poll() for Win32 # if hasattr(_c, 'WSAEventSelect'): - - def poll(fddict, timeout=-1): + # WSAWaitForMultipleEvents is broken. If you wish to try it, + # rename the function to poll() and run test_exchange in test_rpoll + def _poll(fddict, timeout=-1): """'fddict' maps file descriptors to interesting events. 'timeout' is an integer in milliseconds, and NOT a float number of seconds, but it's the same in CPython. Use -1 for infinite. @@ -188,6 +189,7 @@ if timeout < 0: timeout = _c.INFINITE + # XXX does not correctly report write status of a port ret = _c.WSAWaitForMultipleEvents(numevents, socketevents, False, timeout, False) diff --git a/rpython/rlib/rsre/rsre_re.py b/rpython/rlib/rsre/rsre_re.py --- a/rpython/rlib/rsre/rsre_re.py +++ b/rpython/rlib/rsre/rsre_re.py @@ -1,12 +1,15 @@ """ -Testing code. This is not used in a PyPy translation. -It exports the same interface as the Python 're' module. +This is not used in a PyPy translation, but it can be used +in RPython code. It exports the same interface as the +Python 're' module. You can call the functions at the start +of the module (expect the ones with NOT_RPYTHON for now). +They must be called with a *constant* pattern string. """ import re, sys from rpython.rlib.rsre import rsre_core, rsre_char from rpython.rlib.rsre.rpy import get_code as _get_code from rpython.rlib.unicodedata import unicodedb -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, we_are_translated rsre_char.set_unicode_db(unicodedb) @@ -18,24 +21,31 @@ X = VERBOSE = re.X # ignore whitespace and comments + at specialize.call_location() def match(pattern, string, flags=0): return compile(pattern, flags).match(string) + at specialize.call_location() def search(pattern, string, flags=0): return compile(pattern, flags).search(string) + at specialize.call_location() def findall(pattern, string, flags=0): return compile(pattern, flags).findall(string) + at specialize.call_location() def finditer(pattern, string, flags=0): return compile(pattern, flags).finditer(string) def sub(pattern, repl, string, count=0): + "NOT_RPYTHON" return compile(pattern).sub(repl, string, count) def subn(pattern, repl, string, count=0): + "NOT_RPYTHON" return compile(pattern).subn(repl, string, count) + at specialize.call_location() def split(pattern, string, maxsplit=0): return compile(pattern).split(string, maxsplit) @@ -71,18 +81,30 @@ def findall(self, string, pos=0, endpos=sys.maxint): matchlist = [] - for match in self.finditer(string, pos, endpos): + scanner = self.scanner(string, pos, endpos) + while True: + match = scanner.search() + if match is None: + break if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: + assert False, ("findall() not supported if there is more " + "than one group: not valid RPython") item = match.groups("") matchlist.append(item) return matchlist def finditer(self, string, pos=0, endpos=sys.maxint): - return iter(self.scanner(string, pos, endpos).search, None) + scanner = self.scanner(string, pos, endpos) + while True: + match = scanner.search() + if match is None: + break + yield match def subn(self, repl, string, count=0): + "NOT_RPYTHON" filter = repl if not callable(repl) and "\\" in repl: # handle non-literal strings; hand it over to the template compiler @@ -130,6 +152,7 @@ return item, n def sub(self, repl, string, count=0): + "NOT_RPYTHON" item, n = self.subn(repl, string, count) return item @@ -212,7 +235,9 @@ grp = self.group(i) if grp is None: grp = default grps.append(grp) - return tuple(grps) + if not we_are_translated(): + grps = tuple(grps) # xxx mostly to make tests happy + return grps def groupdict(self, default=None): d = {} diff --git a/rpython/rlib/rsre/test/test_re.py b/rpython/rlib/rsre/test/test_re.py --- a/rpython/rlib/rsre/test/test_re.py +++ b/rpython/rlib/rsre/test/test_re.py @@ -191,11 +191,15 @@ assert re.findall(":+", "abc") == [] assert re.findall(":+", "a:b::c:::d") == [":", "::", ":::"] assert re.findall("(:+)", "a:b::c:::d") == [":", "::", ":::"] + + def test_re_findall_2(self): + py.test.skip("findall() returning groups is not RPython") assert re.findall("(:)(:*)", "a:b::c:::d") == [(":", ""), (":", ":"), (":", "::")] def test_bug_117612(self): + py.test.skip("findall() returning groups is not RPython") assert re.findall(r"(a|(b))", "aba") == ( [("a", ""),("b", "b"),("a", "")]) diff --git a/rpython/rlib/rsre/test/test_zinterp.py b/rpython/rlib/rsre/test/test_zinterp.py --- a/rpython/rlib/rsre/test/test_zinterp.py +++ b/rpython/rlib/rsre/test/test_zinterp.py @@ -35,3 +35,23 @@ return int("aaaaaa" == g.group(0)) assert interpret(f, [3]) == 1 assert interpret(f, [0]) == 3 + +def test_translates(): + from rpython.rlib.rsre import rsre_re + def f(i): + if i: + s = "aaaaaa" + else: + s = "caaaaa" + print rsre_re.match("(a|b)aa", s) + print rsre_re.match("a{4}", s) + print rsre_re.search("(a|b)aa", s) + print rsre_re.search("a{4}", s) + for x in rsre_re.findall("(a|b)a", s): print x + for x in rsre_re.findall("a{2}", s): print x + for x in rsre_re.finditer("(a|b)a", s): print x + for x in rsre_re.finditer("a{2}", s): print x + for x in rsre_re.split("(a|b)a", s): print x + for x in rsre_re.split("a{2}", s): print x + return 0 + interpret(f, [3]) # assert does not crash diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -193,7 +193,7 @@ # Truncate. Note that this may grow the file! handle = get_osfhandle(fd) if not SetEndOfFile(handle): - raise WindowsError(GetLastError(), + raise OSError(GetLastError(), "Could not truncate file") finally: # we restore the file pointer position in any case diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -79,6 +79,22 @@ f() self.interpret(f, []) + def test_fdopen(self): + fname = str(self.tmpdir.join('file_4a')) + + def f(): + f = open(fname, "w") + new_fno = os.dup(f.fileno()) + f2 = os.fdopen(new_fno, "w") + f.close() + f2.write("xxx") + f2.close() + + f() + assert open(fname).read() == "xxx" + self.interpret(f, []) + assert open(fname).read() == "xxx" + def test_fileno(self): fname = str(self.tmpdir.join('file_5')) diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -3,12 +3,26 @@ import py from rpython.rlib.rsocket import * -from rpython.rlib.rpoll import * +from rpython.rlib.rpoll import select +try: + from rpython.rlib.rpoll import poll +except ImportError: + py.test.skip('no poll available on this platform') from rpython.rtyper.test.test_llinterp import interpret def setup_module(mod): rsocket_startup() +def one_in_event(events, fd): + assert len(events) == 1 + assert events[0][0] == fd + assert events[0][1] & POLLIN + +def one_out_event(events, fd): + assert len(events) == 1 + assert events[0][0] == fd + assert events[0][1] & POLLOUT + def test_simple(): serv = RSocket(AF_INET, SOCK_STREAM) serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) @@ -24,18 +38,14 @@ assert err != 0 events = poll({serv.fd: POLLIN}, timeout=500) - assert len(events) == 1 - assert events[0][0] == serv.fd - assert events[0][1] & POLLIN + one_in_event(events, serv.fd) servconn_fd, cliaddr = serv.accept() servconn = RSocket(AF_INET, fd=servconn_fd) events = poll({serv.fd: POLLIN, cli.fd: POLLOUT}, timeout=500) - assert len(events) == 1 - assert events[0][0] == cli.fd - assert events[0][1] & POLLOUT + one_out_event(events, cli.fd) err = cli.connect_ex(servaddr) # win32: returns WSAEISCONN when the connection finally succeed. @@ -55,6 +65,72 @@ servconn.close() serv.close() +def test_exchange(): + if not poll: + py.test.skip('poll not available for this platform') + serv = RSocket(AF_INET, SOCK_STREAM) + serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) + serv.listen(1) + servaddr = serv.getsockname() + + events = poll({serv.fd: POLLIN}, timeout=100) + assert len(events) == 0 + + cli = RSocket(AF_INET, SOCK_STREAM) + cli.setblocking(True) + err = cli.connect_ex(servaddr) + assert err == 0 + + events = poll({serv.fd: POLLIN}, timeout=500) + one_in_event(events, serv.fd) + + servconn_fd, cliaddr = serv.accept() + servconn = RSocket(AF_INET, fd=servconn_fd) + + events = poll({serv.fd: POLLIN, + cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + + #send some data + events = poll({cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + cli.send("g'day, mate") + events = poll({servconn.fd: POLLIN}, timeout=500) + one_in_event(events, servconn.fd) + answer = servconn.recv(1024) + assert answer == "g'day, mate" + + #send a reply + events = poll({servconn.fd: POLLOUT}, timeout=500) + one_out_event(events, servconn.fd) + servconn.send("you mean hello?") + events = poll({cli.fd: POLLIN}, timeout=500) + one_in_event(events, cli.fd) + answer = cli.recv(1024) + assert answer == "you mean hello?" + + #send more data + events = poll({cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + cli.send("sorry, wrong channel") + events = poll({servconn.fd: POLLIN}, timeout=500) + one_in_event(events, servconn.fd) + answer = servconn.recv(1024) + assert answer == "sorry, wrong channel" + + events = poll({servconn.fd: POLLOUT}, timeout=500) + one_out_event(events, servconn.fd) + servconn.send("np bye") + events = poll({cli.fd: POLLIN}, timeout=500) + one_in_event(events, cli.fd) + answer = cli.recv(1024) + assert answer == "np bye" + + cli.close() + servconn.close() + serv.close() + + def test_select(): if os.name == 'nt': py.test.skip('cannot select on file handles on windows') diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -410,7 +410,7 @@ total = sum([b for a, b in l]) for a, b in l: if a is None: - a = 'interpret' + a = 'normal-execution' s = " " * (50 - len(a)) print >>outfile, a, s, str(b*100/total) + "%" if out != '-': From noreply at buildbot.pypy.org Tue Apr 1 22:39:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 1 Apr 2014 22:39:07 +0200 (CEST) Subject: [pypy-commit] pypy default: complete the other half of f43ca379c4c9, Popen with shell=True passes env properly on win32 Message-ID: <20140401203907.57DEA1C112D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70381:6513cd57d9d5 Date: 2014-04-01 23:36 +0300 http://bitbucket.org/pypy/pypy/changeset/6513cd57d9d5/ Log: complete the other half of f43ca379c4c9, Popen with shell=True passes env properly on win32 diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -11,6 +11,9 @@ def run_subprocess(executable, args, env=None, cwd=None): return _run(executable, args, env, cwd) +shell_default = False +if sys.platform == 'win32': + shell_default = True def _run(executable, args, env, cwd): # unless overridden below if isinstance(args, str): @@ -21,7 +24,9 @@ args = [str(executable)] else: args = [str(executable)] + args - shell = False + # shell=True on unix-like is a known security vulnerability, but + # on windows shell=True does not properly propogate the env dict + shell = shell_default # Just before spawning the subprocess, do a gc.collect(). This # should help if we are running on top of PyPy, if the subprocess From noreply at buildbot.pypy.org Tue Apr 1 22:39:08 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 1 Apr 2014 22:39:08 +0200 (CEST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20140401203908.EFAEE1C112D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70382:a00fc425d947 Date: 2014-04-01 23:38 +0300 http://bitbucket.org/pypy/pypy/changeset/a00fc425d947/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -127,3 +127,6 @@ .. branch: win32-fixes4 fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation From noreply at buildbot.pypy.org Tue Apr 1 22:52:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 1 Apr 2014 22:52:59 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: branch for fixing/adapting the list/dict/etc strategies that were disabled Message-ID: <20140401205259.B5DF31C0034@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70383:557b939aefd3 Date: 2014-04-01 13:34 -0700 http://bitbucket.org/pypy/pypy/changeset/557b939aefd3/ Log: branch for fixing/adapting the list/dict/etc strategies that were disabled while focusing on py3k compat diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1192,13 +1192,13 @@ w_ann = None if num_annotations: names_w = space.fixedview(self.popvalue()) - w_ann = space.newdict(strdict=True) + w_ann = space.newdict(strdict=True) # XXX: strdict?? for i in range(len(names_w) - 1, -1, -1): space.setitem(w_ann, names_w[i], self.popvalue()) defaultarguments = self.popvalues(posdefaults) w_kw_defs = None if kwdefaults: - w_kw_defs = space.newdict(strdict=True) + w_kw_defs = space.newdict(strdict=True) # XXX: for i in range(kwdefaults - 1, -1, -1): w_name = self.popvalue() w_def = self.popvalue() diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -58,7 +58,7 @@ #elif instance or strdict or module: # assert w_type is None # strategy = space.fromcache(BytesDictStrategy) - elif False and kwargs: + elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy strategy = space.fromcache(EmptyKwargsDictStrategy) @@ -500,7 +500,7 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.switch_to_bytes_strategy(w_dict) + self.switch_to_unicode_strategy(w_dict) w_dict.setitem_str(key, w_value) def delitem(self, w_dict, w_key): @@ -829,6 +829,7 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) + """ def setitem_str(self, w_dict, key, w_value): assert key is not None self.unerase(w_dict.dstorage)[key] = w_value @@ -844,6 +845,7 @@ def getitem_str(self, w_dict, key): assert key is not None return self.unerase(w_dict.dstorage).get(key, None) + """ def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() @@ -896,43 +898,47 @@ # we should implement the same shortcuts as we do for BytesDictStrategy - ## def setitem_str(self, w_dict, key, w_value): - ## assert key is not None - ## self.unerase(w_dict.dstorage)[key] = w_value + def setitem_str(self, w_dict, key, w_value): + assert key is not None + self.unerase(w_dict.dstorage)[key.decode('ascii')] = w_value - ## def getitem(self, w_dict, w_key): - ## space = self.space - ## # -- This is called extremely often. Hack for performance -- - ## if type(w_key) is space.StringObjectCls: - ## return self.getitem_str(w_dict, w_key.unwrap(space)) - ## # -- End of performance hack -- - ## return AbstractTypedStrategy.getitem(self, w_dict, w_key) + def getitem(self, w_dict, w_key): + space = self.space + # -- This is called extremely often. Hack for performance -- + if type(w_key) is space.StringObjectCls: + #return self.getitem_str(w_dict, w_key.unwrap(space)) + # XXX: + key = w_key.unwrap(space) + return self.unerase(w_dict.dstorage).get(key, None) + # -- End of performance hack -- + return AbstractTypedStrategy.getitem(self, w_dict, w_key) - ## def getitem_str(self, w_dict, key): - ## assert key is not None - ## return self.unerase(w_dict.dstorage).get(key, None) + def getitem_str(self, w_dict, key): + assert key is not None + return self.unerase(w_dict.dstorage).get(key.decode('utf-8'), None) def listview_unicode(self, w_dict): return self.unerase(w_dict.dstorage).keys() - ## def w_keys(self, w_dict): - ## return self.space.newlist_bytes(self.listview_bytes(w_dict)) + #def w_keys(self, w_dict): + # # XXX: I think we can completely kill w_keys... + # return self.space.newlist_str(self.listview_str(w_dict)) def wrapkey(space, key): return space.wrap(key) - ## @jit.look_inside_iff(lambda self, w_dict: - ## w_dict_unrolling_heuristic(w_dict)) - ## def view_as_kwargs(self, w_dict): - ## d = self.unerase(w_dict.dstorage) - ## l = len(d) - ## keys, values = [None] * l, [None] * l - ## i = 0 - ## for key, val in d.iteritems(): - ## keys[i] = key - ## values[i] = val - ## i += 1 - ## return keys, values + @jit.look_inside_iff(lambda self, w_dict: + w_dict_unrolling_heuristic(w_dict)) + def view_as_kwargs(self, w_dict): + d = self.unerase(w_dict.dstorage) + l = len(d) + keys, values = [None] * l, [None] * l + i = 0 + for key, val in d.iteritems(): + keys[i] = key.encode('utf-8') + values[i] = val + i += 1 + return keys, values create_iterator_classes(UnicodeDictStrategy) diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -3,12 +3,12 @@ from rpython.rlib import rerased, jit from pypy.objspace.std.dictmultiobject import ( - BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, + DictStrategy, EmptyDictStrategy, ObjectDictStrategy, UnicodeDictStrategy, create_iterator_classes) class EmptyKwargsDictStrategy(EmptyDictStrategy): - def switch_to_bytes_strategy(self, w_dict): + def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy @@ -32,7 +32,8 @@ def is_correct_type(self, w_obj): space = self.space - return space.is_w(space.type(w_obj), space.w_str) + return space.is_w(space.type(w_obj), space.w_unicode) + #return type(w_obj) is space.UnicodeObjectCls def _never_equal_to(self, w_lookup_type): return False @@ -59,7 +60,7 @@ else: # limit the size so that the linear searches don't become too long if len(keys) >= 16: - self.switch_to_bytes_strategy(w_dict) + self.switch_to_unicode_strategy(w_dict) w_dict.setitem_str(key, w_value) else: keys.append(key) @@ -109,7 +110,7 @@ def w_keys(self, w_dict): l = self.unerase(w_dict.dstorage)[0] - return self.space.newlist_bytes(l[:]) + return self.space.newlist_unicode(l[:]) def values(self, w_dict): return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable @@ -132,7 +133,7 @@ w_dict.dstorage = self.get_empty_storage() def switch_to_object_strategy(self, w_dict): - strategy = self.space.fromcache(ObjectDictStrategy) + strategy = self.space.fromcache(UnicodeDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) d_new = strategy.unerase(strategy.get_empty_storage()) for i in range(len(keys)): @@ -140,13 +141,13 @@ w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) - def switch_to_bytes_strategy(self, w_dict): + def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(BytesDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) for i in range(len(keys)): - d_new[keys[i]] = values_w[i] + d_new[keys[i].decode('utf-8')] = values_w[i] w_dict.strategy = strategy w_dict.dstorage = storage diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -3,7 +3,7 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + BytesDictStrategy, ObjectDictStrategy, UnicodeDictStrategy) class TestW_DictObject(object): @@ -1054,22 +1054,36 @@ return l def newlist_bytes(self, l): return l + def newlist_unicode(self, l): + return l DictObjectCls = W_DictMultiObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str return type(w_obj) w_str = str + w_unicode = unicode def str_w(self, string): + if isinstance(string, unicode): + return string.encode('utf-8') assert isinstance(string, str) return string + def unicode_w(self, string): + assert isinstance(string, unicode) + return string + def int_w(self, integer, allow_conversion=True): assert isinstance(integer, int) return integer def wrap(self, obj): + if isinstance(obj, str): + return obj.decode('ascii') + return obj + + def wrapbytes(self, obj): return obj def isinstance_w(self, obj, klass): @@ -1147,10 +1161,15 @@ def setup_method(self,method): self.fakespace = FakeSpace() - self.string = self.fakespace.wrap("fish") - self.string2 = self.fakespace.wrap("fish2") + self.string = self.wrapstroruni("fish") + self.string2 = self.wrapstroruni("fish2") self.impl = self.get_impl() + def wrapstrorunicode(self, obj): + # XXX: blargh this is all screwed. its referencing FakeString + # and using regular strings to setitem. + return self.fakespace.wrap(obj) + def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() @@ -1295,18 +1314,19 @@ assert "s" not in d.w_keys() assert F() not in d.w_keys() -class TestBytesDictImplementation(BaseTestRDictImplementation): - StrategyClass = BytesDictStrategy +class TestUnicodeDictImplementation(BaseTestRDictImplementation): + StrategyClass = UnicodeDictStrategy #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() - s = FakeString(self.string) + #s = FakeString(self.string) + s = FakeUnicode(self.string) assert self.impl.getitem(s) == 1000 assert s.unwrapped def test_view_as_kwargs(self): - py.test.py3k_skip("XXX: strategies are currently broken") + #py.test.py3k_skip("XXX: strategies are currently broken") self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) @@ -1322,8 +1342,8 @@ def check_not_devolved(self): pass -class TestDevolvedBytesDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = BytesDictStrategy +class TestDevolvedUnicodeDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = UnicodeDictStrategy def test_module_uses_strdict(): diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -73,7 +73,7 @@ for i in range(100): assert d.setitem_str("d%s" % i, 4) is None assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert "UnicodeDictStrategy" == d.strategy.__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -133,7 +133,6 @@ return r[r.find("(") + 1: r.find(")")] def test_create(self): - py3k_skip("need UnicodeDictStrategy to work in py3k") def f(**args): return args d = f(a=1) @@ -149,7 +148,6 @@ assert sorted(f(a=2, b=3).values()) == [2, 3] def test_setdefault(self): - py3k_skip("XXX: strategies are currently broken") def f(**args): return args d = f(a=1, b=2) From noreply at buildbot.pypy.org Wed Apr 2 00:27:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 2 Apr 2014 00:27:33 +0200 (CEST) Subject: [pypy-commit] pypy default: remove read-only files in stdlib tests, also filed issue21128 with cpython Message-ID: <20140401222733.318EC1C3001@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70384:548126e2b490 Date: 2014-04-02 01:24 +0300 http://bitbucket.org/pypy/pypy/changeset/548126e2b490/ Log: remove read-only files in stdlib tests, also filed issue21128 with cpython diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,7 +48,11 @@ def tearDown(self): os.chdir(self.old_dir) - shutil.rmtree(self.temp_dir, True) + gc.collect() + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) From noreply at buildbot.pypy.org Wed Apr 2 00:37:28 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 2 Apr 2014 00:37:28 +0200 (CEST) Subject: [pypy-commit] pypy ast-issue1673: add doc for ast-issue1673 branch in whatsnew-head.rst Message-ID: <20140401223728.8C8F21C3001@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: ast-issue1673 Changeset: r70385:dfd4cc308b91 Date: 2014-04-01 17:38 -0400 http://bitbucket.org/pypy/pypy/changeset/dfd4cc308b91/ Log: add doc for ast-issue1673 branch in whatsnew-head.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -127,3 +127,7 @@ .. branch: win32-fixes4 fix more tests for win32 + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field \ No newline at end of file From noreply at buildbot.pypy.org Wed Apr 2 00:41:06 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 2 Apr 2014 00:41:06 +0200 (CEST) Subject: [pypy-commit] pypy ast-issue1673: close branch before merging Message-ID: <20140401224106.99C871C0034@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: ast-issue1673 Changeset: r70386:43d5b8ad2ef0 Date: 2014-04-01 23:39 +0100 http://bitbucket.org/pypy/pypy/changeset/43d5b8ad2ef0/ Log: close branch before merging From noreply at buildbot.pypy.org Wed Apr 2 00:41:08 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 2 Apr 2014 00:41:08 +0200 (CEST) Subject: [pypy-commit] pypy default: merge branch ast-issue1673 Message-ID: <20140401224108.4B9CA1C0034@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r70387:e504b1f999df Date: 2014-04-01 23:40 +0100 http://bitbucket.org/pypy/pypy/changeset/e504b1f999df/ Log: merge branch ast-issue1673 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -130,3 +130,7 @@ .. branch: latest-improve-doc Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field \ No newline at end of file diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -49,13 +49,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) @@ -3011,7 +3017,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def stmt_del_lineno(space, w_self): @@ -3038,7 +3045,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def stmt_del_col_offset(space, w_self): @@ -3074,7 +3082,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def FunctionDef_del_name(space, w_self): @@ -3201,7 +3210,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def ClassDef_del_name(space, w_self): @@ -3665,7 +3675,8 @@ w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'nl') + # need to save the original object too + w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state |= 16 def Print_del_nl(space, w_self): @@ -4571,7 +4582,8 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'module') + # need to save the original object too + w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 def ImportFrom_del_module(space, w_self): @@ -4620,7 +4632,8 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'level') + # need to save the original object too + w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 def ImportFrom_del_level(space, w_self): @@ -4938,7 +4951,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def expr_del_lineno(space, w_self): @@ -4965,7 +4979,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def expr_del_col_offset(space, w_self): @@ -6292,7 +6307,8 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'n') + # need to save the original object too + w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 def Num_del_n(space, w_self): @@ -6343,7 +6359,8 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + # need to save the original object too + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Str_del_s(space, w_self): @@ -6423,7 +6440,8 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'attr') + # need to save the original object too + w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 def Attribute_del_attr(space, w_self): @@ -6618,7 +6636,8 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'id') + # need to save the original object too + w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 def Name_del_id(space, w_self): @@ -6853,7 +6872,8 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + # need to save the original object too + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Const_del_value(space, w_self): @@ -7521,7 +7541,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def excepthandler_del_lineno(space, w_self): @@ -7548,7 +7569,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def excepthandler_del_col_offset(space, w_self): @@ -7716,7 +7738,8 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'vararg') + # need to save the original object too + w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 def arguments_del_vararg(space, w_self): @@ -7746,7 +7769,8 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'kwarg') + # need to save the original object too + w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 4 def arguments_del_kwarg(space, w_self): @@ -7824,7 +7848,8 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + # need to save the original object too + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def keyword_del_arg(space, w_self): @@ -7905,7 +7930,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 def alias_del_name(space, w_self): @@ -7935,7 +7961,8 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'asname') + # need to save the original object too + w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 def alias_del_asname(space, w_self): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -459,6 +459,7 @@ self.emit("raise OperationError(space.w_TypeError, " "space.w_None)", 3) else: + save_original_object = True level = 2 if field.opt and field.type.value != "int": self.emit("if space.is_w(w_new_value, space.w_None):", 2) @@ -596,13 +597,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -387,3 +387,40 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + + def test_dict_astNode(self): + import ast + num_node = ast.Num(n=2, lineno=2, col_offset=3) + dict_res = num_node.__dict__ + + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Num_notfullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 + num_node2 = copy.deepcopy(num_node) + + def test_issue1673_Num_fullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset + dict_res = num_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Str(self): + import ast + import copy + str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node.lineno == 2 + str_node2 = copy.deepcopy(str_node) + dict_res = str_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2} + \ No newline at end of file From noreply at buildbot.pypy.org Wed Apr 2 02:58:06 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 02:58:06 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: hg merge py3k Message-ID: <20140402005806.58EE41C155F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70388:911b0a74e75d Date: 2014-04-01 23:22 +0200 http://bitbucket.org/pypy/pypy/changeset/911b0a74e75d/ Log: hg merge py3k diff too long, truncating to 2000 out of 8265 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -64,6 +64,7 @@ ^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ +^pypy/goal/.+\.lib$ ^pypy/_cache$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py --- a/lib-python/2.7/test/test_file.py +++ b/lib-python/2.7/test/test_file.py @@ -301,6 +301,7 @@ self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) # Reading after iteration hit EOF shouldn't hurt either + f.close() f = self.open(TESTFN, 'rb') try: for line in f: diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -231,9 +231,14 @@ unicwd = u'\xe7w\xf0' try: fsencoding = test_support.TESTFN_ENCODING or "ascii" - unicwd.encode(fsencoding) + asciival = unicwd.encode(fsencoding) + if fsencoding == "mbcs": + # http://bugs.python.org/issue850997 + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): - # FS encoding is probably ASCII + # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass else: with test_support.temp_cwd(unicwd): diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,6 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) + f.close() def test_head(self): response = self.request( diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -171,7 +171,7 @@ # very inconsisten on CPython. In PyPy, memoryview supports # the buffer interface, and thus the following comparison # succeeds. See also the comment in - # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer # # Comparison with objects which don't support the buffer API self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) diff --git a/lib-python/3/test/test_aifc.py b/lib-python/3/test/test_aifc.py --- a/lib-python/3/test/test_aifc.py +++ b/lib-python/3/test/test_aifc.py @@ -77,7 +77,7 @@ self.assertEqual(f.getparams(), fout.getparams()) self.assertEqual(f.readframes(5), fout.readframes(5)) - @impl_detail("PyPy has no audioop module yet", pypy=False) + @impl_detail("PyPy has no audioop.lin2ulaw yet", pypy=False) def test_compress(self): f = self.f = aifc.open(self.sndfilepath) fout = self.fout = aifc.open(TESTFN, 'wb') diff --git a/lib-python/3/test/test_sunau.py b/lib-python/3/test/test_sunau.py --- a/lib-python/3/test/test_sunau.py +++ b/lib-python/3/test/test_sunau.py @@ -1,4 +1,4 @@ -from test.support import run_unittest, TESTFN +from test.support import run_unittest, TESTFN, impl_detail import unittest import os @@ -41,6 +41,7 @@ self.assertEqual(self.f.readframes(nframes), output) self.f.close() + @impl_detail(pypy=False) def test_ulaw(self): self.f = sunau.open(TESTFN, 'w') self.f.setnchannels(nchannels) diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -136,7 +136,7 @@ return self.value def __buffer__(self): - return self._buffer.__buffer__() + return memoryview(self._buffer) def _get_b_base(self): try: diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,3 +1,5 @@ +import imp, os + try: import cpyext except ImportError: @@ -10,4 +12,12 @@ pass # obscure condition of _ctypes_test.py being imported by py.test else: import _pypy_testcapi - _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') + cfile = '_ctypes_test.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) + imp.load_module('_ctypes_test', fp, filename, description) + except ImportError: + print 'could not find _ctypes_test in',output_dir + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,22 @@ import os, sys, imp -import tempfile +import tempfile, binascii + +def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], content]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + def _get_c_extension_suffix(): for ext, mod, typ in imp.get_suffixes(): @@ -7,12 +24,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -38,6 +38,7 @@ if sys.version_info[0] >= 3: StandardError = Exception + cmp = lambda x, y: (x > y) - (x < y) long = int xrange = range basestring = unicode = str diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,17 @@ +import imp, os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: - import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + +import _pypy_testcapi +cfile = '_testcapimodule.c' +thisdir = os.path.dirname(__file__) +output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + imp.load_module('_testcapi', fp, filename, description) +except ImportError: + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -292,6 +292,10 @@ depending on the compiler settings, the default of 768KB is enough for about 1400 calls. +* since the implementation of dictionary is different, the exact number + which ``__hash__`` and ``__eq__`` are called is different. Since CPython + does not give any specific guarantees either, don't rely on it. + * assignment to ``__class__`` is limited to the cases where it works on CPython 2.5. On CPython 2.6 and 2.7 it works in a bit more cases, which are not supported by PyPy so far. (If needed, diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: http://codespeak.net/pypy/extradoc/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: http://codespeak.net/pypy/extradoc/talk/ep2006/usecases-slides.html -.. _`PyPy introduction at EuroPython 2006`: http://codespeak.net/pypy/extradoc/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: http://codespeak.net/pypy/extradoc/talk/22c3/hpk-tech.html -.. _`PyPy development method`: http://codespeak.net/pypy/extradoc/talk/pycon2006/method_talk.html -.. _`PyPy intro`: http://codespeak.net/pypy/extradoc/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: http://codespeak.net/pypy/extradoc/talk/oscon2003-paper.html -.. _`Architecture introduction slides`: http://codespeak.net/pypy/extradoc/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: http://codespeak.net/pypy/extradoc/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: http://codespeak.net/pypy/extradoc/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: http://codespeak.net/pypy/extradoc/talk/pypy-talk-pycon2005/README.html -.. _`Trouble in Paradise`: http://codespeak.net/pypy/extradoc/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: http://codespeak.net/pypy/extradoc/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf -.. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html +.. _`PyPy 3000`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt +.. _`PyPy introduction at EuroPython 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt +.. _`PyPy intro`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bitbucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt +.. _`Architecture introduction slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt +.. _`Trouble in Paradise`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -14,7 +14,7 @@ The present document describes the specific garbage collectors that we wrote in our framework. -.. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU-report on this topic`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf Garbage collectors currently written for the GC framework diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -389,7 +389,7 @@ .. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html -.. _`py library`: http://pylib.org +.. _`py library`: http://pylib.readthedocs.org/ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -103,7 +103,7 @@ .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy +.. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -211,6 +211,9 @@ .. __: `recursion depth limit`_ +We also do not include any of the recent API additions to Stackless +Python, like ``set_atomic()``. Contributions welcome. + Recursion depth limit +++++++++++++++++++++ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -105,3 +105,25 @@ .. branch: stdlib-2.7.6 Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -700,7 +700,7 @@ def setup_bootstrap_path(executable): """ - Try to to as little as possible and to have the stdlib in sys.path. In + Try to do as little as possible and to have the stdlib in sys.path. In particular, we cannot use any unicode at this point, because lots of unicode operations require to be able to import encodings. """ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -193,6 +193,15 @@ def immutable_unique_id(self, space): return None + def buffer_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_memoryview): + return w_result.buffer_w(space) + raise oefmt(space.w_TypeError, + "'%T' does not support the buffer interface", self) + def bytes_w(self, space): self._typed_unwrap_error(space, "bytes") @@ -436,14 +445,12 @@ def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') - try: - w_mod = self.getitem(w_modules, w_name) - except OperationError, e: - if not e.match(self, self.w_KeyError): - raise - else: - if not force_init: - return w_mod + if not force_init: + try: + return self.getitem(w_modules, w_name) + except OperationError, e: + if not e.match(self, self.w_KeyError): + raise # If the module is a builtin but not yet imported, # retrieve it and initialize it @@ -454,13 +461,13 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # Add the module to sys.modules - self.setitem(w_modules, w_name, w_mod) - - # And initialize it + # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): w_mod.init(self) + + # Add the module to sys.modules + self.setitem(w_modules, w_name, w_mod) return w_mod def get_builtinmodule_to_install(self): @@ -1307,10 +1314,7 @@ 'to unsigned int')) def buffer_w(self, w_obj): - # returns a Buffer instance - from pypy.interpreter.buffer import Buffer - w_buffer = self.buffer(w_obj) - return self.interp_w(Buffer, w_buffer) + return w_obj.buffer_w(self) def rwbuffer_w(self, w_obj): # returns a RWBuffer instance @@ -1727,7 +1731,6 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,32 +1,12 @@ """ Buffer protocol support. """ +from rpython.rlib.objectmodel import import_from_mixin -# The implementation of the buffer protocol. The basic idea is that we -# can ask any app-level object for a 'buffer' view on it, by calling its -# __buffer__() special method. It should return a wrapped instance of a -# subclass of the Buffer class defined below. Note that __buffer__() is -# a PyPy-only extension to the Python language, made necessary by the -# fact that it's not natural in PyPy to hack an interp-level-only -# interface. -# In normal usage, the convenience method space.buffer_w() should be -# used to get directly a Buffer instance. Doing so also gives you for -# free the typecheck that __buffer__() really returned a wrapped Buffer. - -import operator -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash, import_from_mixin -from rpython.rlib.rstring import StringBuilder - - -class Buffer(W_Root): - """Abstract base class for memory views.""" - - __slots__ = () # no extra slot here +class Buffer(object): + """Abstract base class for buffers.""" + __slots__ = [] def getlength(self): raise NotImplementedError @@ -50,93 +30,10 @@ def is_writable(self): return False - # __________ app-level support __________ - - def descr_len(self, space): - return space.wrap(self.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - return space.wrapbytes(self.getitem(start)) - res = self.getslice(start, stop, step, size) - return space.wrapbytes(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self, RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - if len(newstring) != 1: - msg = 'buffer[index]=x: x must be a single character' - raise OperationError(space.w_TypeError, space.wrap(msg)) - char = newstring[0] # annotator hint - self.setitem(start, char) - elif step == 1: - if len(newstring) != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_ValueError, space.wrap(msg)) - self.setslice(start, newstring) - else: - raise OperationError(space.w_ValueError, - space.wrap("buffer object does not support" - " slicing with a step")) - - def descr__buffer__(self, space): - return space.wrap(self) - - def descr_str(self, space): - return space.wrap(self.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrapbytes(self.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrapbytes(self.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self, RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.getlength())) - class RWBuffer(Buffer): - """Abstract base class for read-write memory views.""" - - __slots__ = () # no extra slot here + """Abstract base class for read-write buffers.""" + __slots__ = [] def is_writable(self): return True @@ -151,76 +48,8 @@ self.setitem(start + i, string[i]) - at unwrap_spec(offset=int, size=int) -def descr_buffer__new__(space, w_subtype, w_object, offset=0, size=-1): - # w_subtype can only be exactly 'buffer' for now - if not space.is_w(w_subtype, space.gettypefor(Buffer)): - raise OperationError(space.w_TypeError, - space.wrap("argument 1 must be 'buffer'")) - - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - w_buffer = space.wrap(StringBuffer(builder.build())) - else: - w_buffer = space.buffer(w_object) - - buffer = space.interp_w(Buffer, w_buffer) # type-check - if offset == 0 and size == -1: - return w_buffer - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buffer, RWBuffer): - buffer = RWSubBuffer(buffer, offset, size) - else: - buffer = SubBuffer(buffer, offset, size) - return space.wrap(buffer) - - -Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(descr_buffer__new__), - __len__ = interp2app(Buffer.descr_len), - __getitem__ = interp2app(Buffer.descr_getitem), - __setitem__ = interp2app(Buffer.descr_setitem), - __buffer__ = interp2app(Buffer.descr__buffer__), - __str__ = interp2app(Buffer.descr_str), - __add__ = interp2app(Buffer.descr_add), - __eq__ = interp2app(Buffer.descr_eq), - __ne__ = interp2app(Buffer.descr_ne), - __lt__ = interp2app(Buffer.descr_lt), - __le__ = interp2app(Buffer.descr_le), - __gt__ = interp2app(Buffer.descr_gt), - __ge__ = interp2app(Buffer.descr_ge), - __hash__ = interp2app(Buffer.descr_hash), - __mul__ = interp2app(Buffer.descr_mul), - __rmul__ = interp2app(Buffer.descr_mul), - __repr__ = interp2app(Buffer.descr_repr), -) -Buffer.typedef.acceptable_as_base_class = False - -# ____________________________________________________________ - class StringBuffer(Buffer): + __slots__ = ['value'] def __init__(self, value): self.value = value @@ -241,49 +70,12 @@ assert 0 <= start <= stop return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) - - -class StringLikeBuffer(Buffer): - """For app-level objects that already have a string-like interface - with __len__ and a __getitem__ that returns characters or (with - slicing) substrings.""" - # XXX this is inefficient, it should only be used temporarily - - def __init__(self, space, w_obj): - self.space = space - self.w_obj = w_obj - - def getlength(self): - space = self.space - return space.len_w(self.w_obj) - - def getitem(self, index): - space = self.space - w_value = space.getitem(self.w_obj, space.wrap(index)) - try: - return chr(space.int_w(w_value)) - except OperationError as e: - if not e.match(space, space.w_TypeError): - raise - s = space.bytes_w(w_value) - if len(s) != 1: - raise OperationError(space.w_ValueError, - space.wrap("single byte expected, got string")) - char = s[0] # annotator hint - return char - - def getslice(self, start, stop, step, size): - space = self.space - if step != 1: - raise OperationError(space.w_ValueError, space.wrap( - "buffer object does not support slicing with a step")) - s = space.str_w(space.getslice(self.w_obj, space.wrap(start), - space.wrap(stop))) - return s - # ____________________________________________________________ + class SubBufferMixin(object): + _attrs_ = ['buffer', 'offset', 'size'] + def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -305,11 +97,14 @@ if start == stop: return '' # otherwise, adding self.offset might make them # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) + return self.buffer.getslice(self.offset + start, self.offset + stop, + step, size) + class SubBuffer(Buffer): import_from_mixin(SubBufferMixin) + class RWSubBuffer(RWBuffer): import_from_mixin(SubBufferMixin) diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,31 +1,28 @@ import py -from pypy.interpreter.buffer import Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) class TestBuffer: - def test_buffer_w(self): space = self.space w_hello = space.wrapbytes('hello world') buf = space.buffer_w(w_hello) - assert isinstance(buf, Buffer) assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.wrap(buf)) is buf + assert space.buffer_w(space.newbuffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.buffer(w_hello)) == 'hello world' + assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - e = space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) + e = space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) message = space.unwrap(e.value.get_w_value(space)) assert "'int' does not support the buffer interface" == message def test_file_write(self): space = self.space - w_buffer = space.buffer(space.wrapbytes('hello world')) + w_buffer = space.newbuffer(space.buffer_w(space.wrapbytes('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') @@ -37,4 +34,4 @@ f.close() assert data == 'hello world' -# Note: some app-level tests for buffer are in module/__builtin__/test/. +# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -29,14 +29,10 @@ interpleveldefs = { # constants + '__debug__' : '(space.w_True)', # XXX 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', - '__debug__' : '(space.w_True)', # XXX - 'type' : '(space.w_type)', - 'object' : '(space.w_object)', - 'memoryview' : 'interp_memoryview.W_MemoryView', - 'open' : 'state.get(space).w_open', # interp-level function definitions diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py deleted file mode 100644 --- a/pypy/module/__builtin__/interp_memoryview.py +++ /dev/null @@ -1,198 +0,0 @@ -""" -Implementation of the 'buffer' and 'memoryview' types. -""" -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import buffer -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError -import operator - -class W_MemoryView(W_Root): - """Implement the built-in 'memoryview' type as a thin wrapper around - an interp-level buffer. - """ - - def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) - self.buf = buf - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if self.buf is None: - return space.wrap(getattr(operator, name)(self, w_other)) - if isinstance(w_other, W_MemoryView): - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - - try: - w_buf = space.buffer(w_other) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return space.w_NotImplemented - else: - str1 = self.as_str() - str2 = space.buffer_w(w_buf).as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - - def as_str(self): - return self.buf.as_str() - - def getlength(self): - return self.buf.getlength() - - def getslice(self, start, stop): - if start < 0: - start = 0 - size = stop - start - if size < 0: - size = 0 - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf = buffer.RWSubBuffer(buf, start, size) - else: - buf = buffer.SubBuffer(buf, start, size) - return W_MemoryView(buf) - - def descr_buffer(self, space): - """ - Note that memoryview() is very inconsistent in CPython: it does not - support the buffer interface but does support the new buffer - interface: as a result, it is possible to pass memoryview to - e.g. socket.send() but not to file.write(). For simplicity and - consistency, in PyPy memoryview DOES support buffer(), which means - that it is accepted in more places than CPython. - """ - self._check_released(space) - return space.wrap(self.buf) - - def descr_tobytes(self, space): - self._check_released(space) - return space.wrapbytes(self.as_str()) - - def descr_tolist(self, space): - self._check_released(space) - buf = self.buf - result = [] - for i in range(buf.getlength()): - result.append(space.wrap(ord(buf.getitem(i)))) - return space.newlist(result) - - def descr_getitem(self, space, w_index): - self._check_released(space) - start, stop, step = space.decode_index(w_index, self.getlength()) - if step == 0: # index only - return space.wrapbytes(self.buf.getitem(start)) - elif step == 1: - res = self.getslice(start, stop) - return space.wrap(res) - else: - raise OperationError(space.w_ValueError, - space.wrap("memoryview object does not support" - " slicing with a step")) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - self._check_released(space) - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf.descr_setitem(space, w_index, newstring) - else: - raise OperationError(space.w_TypeError, - space.wrap("cannot modify read-only memory")) - - def descr_len(self, space): - self._check_released(space) - return self.buf.descr_len(space) - - def w_get_format(self, space): - self._check_released(space) - return space.wrap("B") - - def w_get_itemsize(self, space): - self._check_released(space) - return space.wrap(1) - - def w_get_ndim(self, space): - self._check_released(space) - return space.wrap(1) - - def w_is_readonly(self, space): - self._check_released(space) - return space.wrap(not isinstance(self.buf, buffer.RWBuffer)) - - def w_get_shape(self, space): - self._check_released(space) - return space.newtuple([space.wrap(self.getlength())]) - - def w_get_strides(self, space): - self._check_released(space) - return space.newtuple([space.wrap(1)]) - - def w_get_suboffsets(self, space): - self._check_released(space) - # I've never seen anyone filling this field - return space.w_None - - def descr_repr(self, space): - if self.buf is None: - return self.getrepr(space, u'released memory') - else: - return self.getrepr(space, u'memory') - - def descr_release(self, space): - self.buf = None - - def _check_released(self, space): - if self.buf is None: - raise OperationError(space.w_ValueError, space.wrap( - "operation forbidden on released memoryview object")) - - def descr_enter(self, space): - self._check_released(space) - return self - - def descr_exit(self, space, __args__): - self.buf = None - return space.w_None - - -def descr_new(space, w_subtype, w_object): - memoryview = W_MemoryView(space.buffer(w_object)) - return space.wrap(memoryview) - -W_MemoryView.typedef = TypeDef( - "memoryview", - __doc__ = """\ -Create a new memoryview object which references the given object. -""", - __new__ = interp2app(descr_new), - __buffer__ = interp2app(W_MemoryView.descr_buffer), - __eq__ = interp2app(W_MemoryView.descr_eq), - __getitem__ = interp2app(W_MemoryView.descr_getitem), - __len__ = interp2app(W_MemoryView.descr_len), - __ne__ = interp2app(W_MemoryView.descr_ne), - __setitem__ = interp2app(W_MemoryView.descr_setitem), - __repr__ = interp2app(W_MemoryView.descr_repr), - __enter__ = interp2app(W_MemoryView.descr_enter), - __exit__ = interp2app(W_MemoryView.descr_exit), - tobytes = interp2app(W_MemoryView.descr_tobytes), - tolist = interp2app(W_MemoryView.descr_tolist), - release = interp2app(W_MemoryView.descr_release), - format = GetSetProperty(W_MemoryView.w_get_format), - itemsize = GetSetProperty(W_MemoryView.w_get_itemsize), - ndim = GetSetProperty(W_MemoryView.w_get_ndim), - readonly = GetSetProperty(W_MemoryView.w_is_readonly), - shape = GetSetProperty(W_MemoryView.w_get_shape), - strides = GetSetProperty(W_MemoryView.w_get_strides), - suboffsets = GetSetProperty(W_MemoryView.w_get_suboffsets), - ) -W_MemoryView.typedef.acceptable_as_base_class = False diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -25,6 +25,13 @@ else: cls.w_safe_runtimerror = cls.space.wrap(sys.version_info < (2, 6)) + def test_builtin_names(self): + import builtins as __builtin__ + assert __builtin__.bytes is bytes + assert __builtin__.dict is dict + assert __builtin__.memoryview is memoryview + assert not hasattr(__builtin__, 'buffer') + def test_bytes_alias(self): assert bytes is not str assert isinstance(eval("b'hi'"), bytes) diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -7,7 +7,6 @@ class ByteBuffer(RWBuffer): - def __init__(self, len): self.data = ['\x00'] * len @@ -23,4 +22,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return space.wrap(ByteBuffer(length)) + return space.newbuffer(ByteBuffer(length)) diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.rstring import UnicodeBuilder, StringBuilder from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib import jit def create_builder(name, strtype, builder_cls): diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -4,6 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from pypy.objspace.std.memoryview import _buffer_setitem from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi @@ -39,26 +40,30 @@ copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) +# Override the typedef to narrow down the interface that's exposed to app-level + class MiniBuffer(W_Root): - # a different subclass of W_Root for the MiniBuffer, because we - # want a slightly different (simplified) API at the level of Python. - def __init__(self, buffer, keepalive=None): self.buffer = buffer self.keepalive = keepalive + def buffer_w(self, space): + return self.buffer + def descr_len(self, space): - return self.buffer.descr_len(space) + return space.wrap(self.buffer.getlength()) def descr_getitem(self, space, w_index): - return self.buffer.descr_getitem(space, w_index) + start, stop, step, size = space.decode_index4(w_index, + self.buffer.getlength()) + if step == 0: + return space.wrapbytes(self.buffer.getitem(start)) + res = self.buffer.getslice(start, stop, step, size) + return space.wrapbytes(res) @unwrap_spec(newstring='bufferstr') def descr_setitem(self, space, w_index, newstring): - self.buffer.descr_setitem(space, w_index, newstring) - - def descr__buffer__(self, space): - return self.buffer.descr__buffer__(space) + _buffer_setitem(space, self.buffer, w_index, newstring) MiniBuffer.typedef = TypeDef( @@ -67,7 +72,6 @@ __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), - __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -80,7 +80,6 @@ return (cfield.ctype, cfield.offset) def _copy_from_same(self, cdata, w_ob): - space = self.space if isinstance(w_ob, cdataobj.W_CData): if w_ob.ctype is self and self.size >= 0: misc._raw_memcopy(w_ob._cdata, cdata, self.size) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,4 +1,3 @@ -import weakref from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here, specialize -from rpython.rlib.rarithmetic import r_uint, r_ulonglong, is_signed_integer_type +from rpython.rlib.rarithmetic import r_uint, r_ulonglong from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -1,3 +1,5 @@ +import sys + class AppTestCodecs: spaceconfig = { "usemodules": ['unicodedata', 'struct', 'binascii'], @@ -138,7 +140,9 @@ class AppTestPartialEvaluation: - spaceconfig = dict(usemodules=('array',)) + spaceconfig = dict(usemodules=['array',]) + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_winreg') def test_partial_utf8(self): import _codecs @@ -753,9 +757,25 @@ import sys if sys.platform != 'win32': return - assert 'test'.encode('mbcs') == b'test' - assert 'caf\xe9'.encode('mbcs') == b'caf\xe9' - raises(UnicodeEncodeError, '\u040a'.encode, 'mbcs') - raises(UnicodeEncodeError, - "-\u5171\u0141\u2661\u0363\uDC80".encode, 'mbcs') - assert b'cafx\e9'.decode('mbcs') == 'cafx\e9' + toencode = u'caf\xe9', b'caf\xe9' + try: + # test for non-latin1 codepage, more general test needed + import _winreg + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + r'System\CurrentControlSet\Control\Nls\CodePage') + if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': # non-latin1 + toencode = u'caf\xbf',b'caf\xbf' + except: + assert False, 'cannot test mbcs on this windows system, check code page' + assert u'test'.encode('mbcs') == b'test' + assert toencode[0].encode('mbcs') == toencode[1] + assert u'\u040a'.encode('mbcs') == b'?' # some cyrillic letter + assert b'cafx\e9'.decode('mbcs') == u'cafx\e9' + + def test_handler_string_result(self): + import _codecs + def f(exc): + return (b'foo', exc.end) + _codecs.register_error("test.test_codecs_not_a_string", f) + result = '\u1234'.encode('ascii', 'test.test_codecs_not_a_string') + assert result == b'foo' diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -521,7 +521,7 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) - w_buf = space.wrap(RawBuffer(buffer, start, length)) + w_buf = space.newbuffer(RawBuffer(buffer, start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_longlong from pypy.module._io.interp_bufferedio import W_BufferedIOBase from pypy.module._io.interp_iobase import convert_size +from pypy.objspace.std.memoryview import W_MemoryView import sys @@ -121,7 +122,7 @@ return space.wrap(size) def getbuffer_w(self, space): - return space.wrap(BytesIOBuffer(self)) + return space.wrap(W_MemoryView(BytesIOBuffer(self))) def getvalue_w(self, space): self._check_closed(space) diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -1,4 +1,3 @@ - """ The ffi for rpython, need to be imported for side effects """ @@ -8,8 +7,6 @@ from rpython.rtyper.extfunc import register_external from pypy.module._minimal_curses import interp_curses from rpython.translator.tool.cbuild import ExternalCompilationInfo -from sys import platform -import os.path # We cannot trust ncurses5-config, it's broken in various ways in # various versions. For example it might not list -ltinfo even though diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -9,8 +9,12 @@ class AppTestBufferTooShort: spaceconfig = {'usemodules': ['_multiprocessing', 'thread', 'signal', - 'select', 'fcntl', 'struct', - 'binascii']} + 'select', 'struct', 'binascii']} + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') + def setup_class(cls): if cls.runappdirect: @@ -75,6 +79,8 @@ 'itertools', '_socket', 'binascii', ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') def setup_class(cls): if sys.platform != "win32": @@ -86,7 +92,6 @@ # just for multiprocessing to import correctly on Windows w_modules = space.sys.get('modules') space.setitem(w_modules, space.wrap('msvcrt'), space.sys) - space.setitem(w_modules, space.wrap('_subprocess'), space.sys) else: import _multiprocessing @@ -100,9 +105,12 @@ spaceconfig = { "usemodules": [ '_multiprocessing', 'thread', 'signal', 'struct', 'array', - '_socket', 'binascii', 'select', 'fcntl', - ] + '_socket', 'binascii', 'select' ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') def setup_class(cls): cls.w_connections = cls.space.newlist([]) diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -3,7 +3,6 @@ from pypy.interpreter.pycode import PyCode from pypy.interpreter.function import Function, Method from pypy.interpreter.module import Module -from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -1,13 +1,10 @@ import sys -import math from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize from rpython.rlib import rfloat, runicode from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter import unicodehelper -from rpython.rtyper.annlowlevel import llstr, hlunicode OVF_DIGITS = len(str(sys.maxint)) @@ -30,7 +27,7 @@ Internally it's implemented at the level of low-level helpers, to avoid the extra copy we would need if we take the actual slice first. - + No bound checking is done, use carefully. """ from rpython.rtyper.annlowlevel import llstr, hlunicode @@ -226,7 +223,6 @@ def decode_array(self, i): w_list = self.space.newlist([]) start = i - count = 0 i = self.skip_whitespace(start) if self.ll_chars[i] == ']': self.pos = i+1 diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -207,7 +207,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), free = interp2app(W_ArrayInstance.free), @@ -232,7 +231,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), byptr = interp2app(W_ArrayInstance.byptr), diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr +from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, unwrap_value, unpack_argshapes, got_libffi_error) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -359,9 +359,9 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def descr_buffer(self, space): + def buffer_w(self, space): from pypy.module._rawffi.buffer import RawFFIBuffer - return space.wrap(RawFFIBuffer(self)) + return RawFFIBuffer(self) def getrawsize(self): raise NotImplementedError("abstract base class") diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -15,7 +15,7 @@ from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc -from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint, \ +from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi @@ -364,7 +364,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), free = interp2app(W_StructureInstance.free), shape = interp_attrproperty('shape', W_StructureInstance), @@ -387,7 +386,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), shape = interp_attrproperty('shape', W_StructureInstance), byptr = interp2app(W_StructureInstance.byptr), diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -1,4 +1,3 @@ - """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -69,10 +69,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) a = addr.lock(_c.sockaddr_in6) rffi.setintfield(a, 'c_sin6_port', rsocket.htons(port)) rffi.setintfield(a, 'c_sin6_flowinfo', rsocket.htonl(flowinfo)) @@ -101,10 +98,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: return rsocket.UNIXAddress(space.str_w(w_address)) @@ -116,10 +110,16 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): if port < 0 or port > 0xffff: - raise OperationError(space.w_ValueError, space.wrap( + raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) return rffi.cast(rffi.USHORT, port) +def make_unsigned_flowinfo(space, flowinfo): + if flowinfo < 0 or flowinfo > 0xfffff: + raise OperationError(space.w_OverflowError, space.wrap( + "flowinfo must be 0-1048575.")) + return rffi.cast(lltype.Unsigned, flowinfo) + # XXX Hack to seperate rpython and pypy def ipaddr_from_object(space, w_sockaddr): host = space.str_w(space.getitem(w_sockaddr, space.wrap(0))) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -391,7 +391,7 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() - + def test_socket_connect_ex(self): import _socket s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) @@ -417,8 +417,13 @@ def test_bigport(self): import _socket s = _socket.socket() - raises(ValueError, s.connect, ("localhost", 1000000)) - raises(ValueError, s.connect, ("localhost", -1)) + exc = raises(OverflowError, s.connect, ("localhost", -1)) + assert "port must be 0-65535." in str(exc.value) + exc = raises(OverflowError, s.connect, ("localhost", 1000000)) + assert "port must be 0-65535." in str(exc.value) + s = _socket.socket(_socket.AF_INET6) + exc = raises(OverflowError, s.connect, ("::1", 1234, 1048576)) + assert "flowinfo must be 0-1048575." in str(exc.value) def test_NtoH(self): import sys @@ -466,6 +471,13 @@ import socket s = socket.socket() + def test_subclass(self): + from _socket import socket + class MySock(socket): + blah = 123 + s = MySock() + assert s.blah == 123 + def test_getsetsockopt(self): import _socket as socket import struct @@ -524,7 +536,8 @@ skip("GAIError - probably no connection: %s" % str(ex.args)) assert s.send(memoryview(b'')) == 0 assert s.sendall(memoryview(b'')) is None - raises(TypeError, s.send, '') + exc = raises(TypeError, s.send, '') + assert str(exc.value) == "'str' does not support the buffer interface" raises(TypeError, s.sendall, '') s.close() s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM, 0) @@ -587,11 +600,11 @@ class AppTestSocketTCP: + HOST = 'localhost' + def setup_class(cls): cls.space = space - HOST = 'localhost' - def setup_method(self, method): w_HOST = space.wrap(self.HOST) self.w_serv = space.appexec([w_HOST], @@ -602,6 +615,7 @@ serv.listen(1) return serv ''') + def teardown_method(self, method): if hasattr(self, 'w_serv'): space.appexec([self.w_serv], '(serv): serv.close()') @@ -622,7 +636,7 @@ raises(error, raise_error) def test_recv_send_timeout(self): - from _socket import socket, timeout + from _socket import socket, timeout, SOL_SOCKET, SO_RCVBUF, SO_SNDBUF cli = socket() cli.connect(self.serv.getsockname()) fileno, addr = self.serv._accept() @@ -643,6 +657,9 @@ assert count is None buf = t.recv(1) assert buf == b'?' + # speed up filling the buffers + t.setsockopt(SOL_SOCKET, SO_RCVBUF, 4096) + cli.setsockopt(SOL_SOCKET, SO_SNDBUF, 4096) # test send() timeout count = 0 try: @@ -671,7 +688,7 @@ conn = socket.socket(fileno=fileno) buf = memoryview(MSG) conn.send(buf) - buf = array.array('b', b' '*1024) + buf = array.array('b', b' ' * 1024) nbytes = cli.recv_into(buf) assert nbytes == len(MSG) msg = buf.tobytes()[:len(MSG)] @@ -687,7 +704,7 @@ conn = socket.socket(fileno=fileno) buf = memoryview(MSG) conn.send(buf) - buf = array.array('b', b' '*1024) + buf = array.array('b', b' ' * 1024) nbytes, addr = cli.recvfrom_into(buf) assert nbytes == len(MSG) msg = buf.tobytes()[:len(MSG)] @@ -698,6 +715,7 @@ cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) assert cli.family == socket.AF_INET + class AppTestErrno: def setup_class(cls): cls.space = space diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -37,7 +37,7 @@ SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3 SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5 -HAVE_RPOLL = True # Even win32 has rpoll.poll +HAVE_RPOLL = 'poll' in dir(rpoll) constants = {} constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,7 +1,7 @@ from rpython.tool.udir import udir class AppTestSSL: - spaceconfig = dict(usemodules=('_ssl', '_socket', 'binascii')) + spaceconfig = dict(usemodules=('_ssl', '_socket', 'binascii', 'thread')) def setup_class(cls): import os diff --git a/pypy/module/_ssl/thread_lock.py b/pypy/module/_ssl/thread_lock.py --- a/pypy/module/_ssl/thread_lock.py +++ b/pypy/module/_ssl/thread_lock.py @@ -1,4 +1,5 @@ -from rpython.rlib.ropenssl import * +from rpython.rlib import rthread +from rpython.rlib.ropenssl import libraries from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -22,7 +23,6 @@ # without caring about the GIL. separate_module_source = """ - #include static unsigned int _ssl_locks_count = 0; @@ -62,13 +62,12 @@ } """ -from rpython.rlib import rthread - eci = rthread.eci.merge(ExternalCompilationInfo( separate_module_sources=[separate_module_source], post_include_bits=[ "int _PyPy_SSL_SetupThreads(void);"], export_symbols=['_PyPy_SSL_SetupThreads'], + libraries = libraries, )) _PyPy_SSL_SetupThreads = rffi.llexternal('_PyPy_SSL_SetupThreads', diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -40,16 +40,19 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] - if space.lookup(w_initializer, '__buffer__') is not None: - if isinstance(w_initializer, W_ArrayBase): - a.extend(w_initializer, True) - else: - a.descr_frombytes(space, - space.bufferstr_w(w_initializer)) + if isinstance(w_initializer, W_ArrayBase): + a.extend(w_initializer, True) elif space.type(w_initializer) is space.w_list: a.descr_fromlist(space, w_initializer) else: - a.extend(w_initializer, True) + try: + buf = space.bufferstr_w(w_initializer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + a.extend(w_initializer, True) + else: + a.descr_frombytes(space, buf) break else: msg = 'bad typecode (must be b, B, u, h, H, i, I, l, L, f or d)' @@ -135,6 +138,9 @@ self.len = 0 self.allocated = 0 + def buffer_w(self, space): + return ArrayBuffer(self) + def descr_append(self, space, w_x): """ append(x) @@ -505,9 +511,6 @@ def descr_iter(self, space): return space.wrap(ArrayIterator(self)) - def descr_buffer(self, space): - return space.wrap(ArrayBuffer(self)) - def descr_repr(self, space): if self.len == 0: return space.wrap("array('%s')" % self.typecode) @@ -544,7 +547,6 @@ __radd__ = interp2app(W_ArrayBase.descr_radd), __rmul__ = interp2app(W_ArrayBase.descr_rmul), - __buffer__ = interp2app(W_ArrayBase.descr_buffer), __iter__ = interp2app(W_ArrayBase.descr_iter), __repr__ = interp2app(W_ArrayBase.descr_repr), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1,25 +1,8 @@ import sys -import py -import py.test - - -## class AppTestSimpleArray: -## spaceconfig = dict(usemodules=('array',)) -## def setup_class(cls): -## cls.w_simple_array = cls.space.appexec([], """(): -## import array -## return array.simple_array -## """) - -## def test_simple(self): -## a = self.simple_array(10) -## a[5] = 7.42 -## assert a[5] == 7.42 +import pytest class BaseArrayTests: - - def test_ctor(self): assert len(self.array('i')) == 0 @@ -545,7 +528,6 @@ assert not a > 2*a assert not a >= 2*a - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -779,7 +761,6 @@ assert img[3, 25] == 3 * 9 - def test_override_from(self): class mya(self.array): def fromlist(self, lst): @@ -862,41 +843,41 @@ def test_assign_object_with_special_methods(self): from array import array - + class Num(object): def __float__(self): return 5.25 - + def __int__(self): return 7 - + class NotNum(object): pass - + class Silly(object): def __float__(self): return None - + def __int__(self): - return None + return None class OldNum: def __float__(self): return 6.25 - + def __int__(self): return 8 - + class OldNotNum: pass - + class OldSilly: def __float__(self): return None - + def __int__(self): return None - + for tc in 'bBhHiIlL': a = array(tc, [0]) raises(TypeError, a.__setitem__, 0, 1.0) @@ -914,7 +895,7 @@ a = array(tc, [0]) a[0] = 1.0 a[0] = 1 - a[0] = Num() + a[0] = Num() assert a[0] == 5.25 raises(TypeError, a.__setitem__, NotNum()) a[0] = OldNum() @@ -922,11 +903,15 @@ raises(TypeError, a.__setitem__, OldNotNum()) raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) - + a = array('u', 'hi') a[0] = 'b' assert a[0] == 'b' - + + a = array('u', u'hi') + a[0] = u'b' + assert a[0] == u'b' + def test_bytearray(self): a = self.array('u', 'hi') b = self.array('u') @@ -940,15 +925,13 @@ assert repr(a) == "array('u', {!r})".format(s) assert eval(repr(a), {'array': self.array}) == a - class DontTestCPythonsOwnArray(BaseArrayTests): - def setup_class(cls): import array cls.array = array.array import struct cls.struct = struct - cls.tempfile = str(py.test.ensuretemp('array').join('tmpfile')) + cls.tempfile = str(pytest.ensuretemp('array').join('tmpfile')) cls.maxint = sys.maxint @@ -961,7 +944,7 @@ return array.array """) cls.w_tempfile = cls.space.wrap( - str(py.test.ensuretemp('array').join('tmpfile'))) + str(pytest.ensuretemp('array').join('tmpfile'))) cls.w_maxint = cls.space.wrap(sys.maxint) def test_buffer_info(self): @@ -1028,11 +1011,11 @@ def test_getitem_only_ints(self): class MyInt(object): - def __init__(self, x): - self.x = x + def __init__(self, x): + self.x = x - def __int__(self): - return self.x + def __int__(self): + return self.x a = self.array('i', [1, 2, 3, 4, 5, 6]) raises(TypeError, "a[MyInt(0)]") diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,9 +22,9 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod From noreply at buildbot.pypy.org Wed Apr 2 02:58:08 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 02:58:08 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: pypy has no switchinterval and the test passes without. Message-ID: <20140402005808.073821C155F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70389:1154666cbe9b Date: 2014-04-02 00:24 +0200 http://bitbucket.org/pypy/pypy/changeset/1154666cbe9b/ Log: pypy has no switchinterval and the test passes without. diff --git a/lib-python/3/test/test_concurrent_futures.py b/lib-python/3/test/test_concurrent_futures.py --- a/lib-python/3/test/test_concurrent_futures.py +++ b/lib-python/3/test/test_concurrent_futures.py @@ -294,14 +294,16 @@ event = threading.Event() def future_func(): event.wait() - oldswitchinterval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) + if hasattr(sys, 'setswitchinterval'): + oldswitchinterval = sys.getswitchinterval() + sys.setswitchinterval(1e-6) try: fs = {self.executor.submit(future_func) for i in range(100)} event.set() futures.wait(fs, return_when=futures.ALL_COMPLETED) finally: - sys.setswitchinterval(oldswitchinterval) + if hasattr(sys, 'setswitchinterval'): + sys.setswitchinterval(oldswitchinterval) class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests): From noreply at buildbot.pypy.org Wed Apr 2 02:58:09 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 02:58:09 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: Skip some implementation details. Message-ID: <20140402005809.B8E811C155F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70390:ddc62d7d5716 Date: 2014-04-02 00:59 +0200 http://bitbucket.org/pypy/pypy/changeset/ddc62d7d5716/ Log: Skip some implementation details. One remaining failure in test_weakref is suspect: __len__() returns -7! diff --git a/lib-python/3/test/test_functools.py b/lib-python/3/test/test_functools.py --- a/lib-python/3/test/test_functools.py +++ b/lib-python/3/test/test_functools.py @@ -197,9 +197,13 @@ raise IndexError f = self.thetype(object) - self.assertRaisesRegex(SystemError, - "new style getargs format but argument is not a tuple", - f.__setstate__, BadSequence()) + if support.check_impl_detail(pypy=True): + # CPython fails, pypy does not :-) + f.__setstate__(BadSequence()) + else: + self.assertRaisesRegex(SystemError, + "new style getargs format but argument is not a tuple", + f.__setstate__, BadSequence()) class PartialSubclass(functools.partial): pass diff --git a/lib-python/3/test/test_weakref.py b/lib-python/3/test/test_weakref.py --- a/lib-python/3/test/test_weakref.py +++ b/lib-python/3/test/test_weakref.py @@ -934,6 +934,7 @@ n1 = len(dct) del it gc.collect() + gc.collect() n2 = len(dct) # one item may be kept alive inside the iterator self.assertIn(n1, (0, 1)) @@ -945,6 +946,7 @@ def test_weak_valued_len_cycles(self): self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k)) + @support.impl_detail("PyPy has no cyclic collection", pypy=False) def check_len_race(self, dict_type, cons): # Extended sanity checks for len() in the face of cyclic collection self.addCleanup(gc.set_threshold, *gc.get_threshold()) diff --git a/lib-python/3/test/test_weakset.py b/lib-python/3/test/test_weakset.py --- a/lib-python/3/test/test_weakset.py +++ b/lib-python/3/test/test_weakset.py @@ -406,11 +406,13 @@ n1 = len(s) del it gc.collect() + gc.collect() n2 = len(s) # one item may be kept alive inside the iterator self.assertIn(n1, (0, 1)) self.assertEqual(n2, 0) + @support.impl_detail("PyPy has no cyclic collection", pypy=False) def test_len_race(self): # Extended sanity checks for len() in the face of cyclic collection self.addCleanup(gc.set_threshold, *gc.get_threshold()) From noreply at buildbot.pypy.org Wed Apr 2 02:58:11 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 02:58:11 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: mangle keyword-only argname when loading defaults (CPython issue #14607) Message-ID: <20140402005811.5A6FE1C155F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70391:a15b2d272f8e Date: 2014-04-02 01:15 +0200 http://bitbucket.org/pypy/pypy/changeset/a15b2d272f8e/ Log: mangle keyword-only argname when loading defaults (CPython issue #14607) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -306,7 +306,8 @@ for i, default in enumerate(args.kw_defaults): if default: kwonly = args.kwonlyargs[i] - self.load_const(self.space.wrap(kwonly.arg.decode('utf-8'))) + mangled = self.scope.mangle(kwonly.arg.decode('utf-8')) + self.load_const(self.space.wrap(mangled)) default.walkabout(self) defaults += 1 return defaults diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -347,6 +347,14 @@ assert l(1, 2, k=10) == 1 + 2 + 10 """ + def test_kwonlyarg_mangling(self): + """ + class X: + def f(self, *, __a=42): + return __a + assert X().f() == 42 + """ + def test_extended_unpacking_short(self): """ class Seq: From noreply at buildbot.pypy.org Wed Apr 2 02:58:12 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 02:58:12 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: Expat parser now correctly works with unicode input, Message-ID: <20140402005812.BBB301C155F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70392:73ca2fbe4077 Date: 2014-04-02 02:21 +0200 http://bitbucket.org/pypy/pypy/changeset/73ca2fbe4077/ Log: Expat parser now correctly works with unicode input, even when the XML internal encoding is not UTF8 (CPython issue 17089) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -2,6 +2,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.unicodehelper import encode_utf8 from rpython.rlib import rgc, jit from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform @@ -348,6 +349,8 @@ XML_SetUnknownEncodingHandler = expat_external( 'XML_SetUnknownEncodingHandler', [XML_Parser, callback_type, rffi.VOIDP], lltype.Void) +XML_SetEncoding = expat_external( + 'XML_SetEncoding', [XML_Parser, rffi.CCHARP], rffi.INT) # Declarations of external functions @@ -622,10 +625,17 @@ # Parse methods - @unwrap_spec(data='bufferstr_or_u', isfinal=bool) - def Parse(self, space, data, isfinal=False): + @unwrap_spec(isfinal=bool) + def Parse(self, space, w_data, isfinal=False): """Parse(data[, isfinal]) Parse XML data. `isfinal' should be true at end of input.""" + if space.isinstance_w(w_data, space.w_unicode): + u = w_data.unicode_w(space) + data = encode_utf8(space, w_data.unicode_w(space)) + # Explicitly set UTF-8 encoding. Return code ignored. + XML_SetEncoding(self.itself, "utf-8") + else: + data = space.bufferstr_w(w_data) res = XML_Parse(self.itself, data, len(data), isfinal) if self._exc_info: e = self._exc_info @@ -643,9 +653,8 @@ eof = False while not eof: w_data = space.call_method(w_file, 'read', space.wrap(2048)) - data = space.bytes_w(w_data) - eof = len(data) == 0 - w_res = self.Parse(space, data, isfinal=eof) + eof = space.len_w(w_data) == 0 + w_res = self.Parse(space, w_data, isfinal=eof) return w_res @unwrap_spec(base=str) diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -100,7 +100,7 @@ p.Parse(xml) def test_python_encoding(self): - # This name is not knonwn by expat + # This name is not known by expat xml = b"caf\xe9" import pyexpat p = pyexpat.ParserCreate() @@ -110,12 +110,21 @@ p.Parse(xml) def test_mbcs(self): - xml = "

" + xml = b"

" import pyexpat p = pyexpat.ParserCreate() exc = raises(ValueError, p.Parse, xml) assert str(exc.value) == "multi-byte encodings are not supported" + def test_parse_str(self): + xml = "caf\xe9" + import pyexpat + p = pyexpat.ParserCreate() + def gotText(text): + assert text == "caf\xe9" + p.CharacterDataHandler = gotText + p.Parse(xml) + def test_decode_error(self): xml = b'Comment \xe7a va ? Tr\xe8s bien ?' import pyexpat From noreply at buildbot.pypy.org Wed Apr 2 09:26:18 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Wed, 2 Apr 2014 09:26:18 +0200 (CEST) Subject: [pypy-commit] cffi reusable-enum-values: Make int constant available in the following declaration. Message-ID: <20140402072618.0D0A51C022D@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: reusable-enum-values Changeset: r1486:2286da76d88b Date: 2014-04-01 17:25 +0800 http://bitbucket.org/cffi/cffi/changeset/2286da76d88b/ Log: Make int constant available in the following declaration. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -99,6 +99,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -514,6 +515,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -537,6 +542,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._int_constants[enum.name] = nextenumvalue nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) From noreply at buildbot.pypy.org Wed Apr 2 09:26:19 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Wed, 2 Apr 2014 09:26:19 +0200 (CEST) Subject: [pypy-commit] cffi reusable-enum-values: add test for enum which refer to previously defined enum value Message-ID: <20140402072619.4ACF21C022D@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: reusable-enum-values Changeset: r1487:e3663a5e6a16 Date: 2014-04-01 19:31 +0800 http://bitbucket.org/cffi/cffi/changeset/e3663a5e6a16/ Log: add test for enum which refer to previously defined enum value diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1322,6 +1322,15 @@ e = ffi.cast("enum e", 0) assert ffi.string(e) == "AA" # pick the first one arbitrarily + def test_enum_refer_previous_enum_value(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("enum e { AA, BB=2, CC=4, DD=BB, EE, FF=CC };") + assert ffi.string(ffi.cast("enum e", 2)) == "BB" + assert ffi.string(ffi.cast("enum e", 3)) == "EE" + assert ffi.sizeof("char[DD]") == 2 + assert ffi.sizeof("char[EE]") == 3 + assert ffi.sizeof("char[FF]") == 4 + def test_nested_anonymous_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" From noreply at buildbot.pypy.org Wed Apr 2 09:26:20 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Wed, 2 Apr 2014 09:26:20 +0200 (CEST) Subject: [pypy-commit] cffi reusable-enum-values: ffi.include update _int_constant and prevent duplicated const decl Message-ID: <20140402072620.888E01C022D@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: reusable-enum-values Changeset: r1488:f31f43f81992 Date: 2014-04-02 11:45 +0800 http://bitbucket.org/cffi/cffi/changeset/f31f43f81992/ Log: ffi.include update _int_constant and prevent duplicated const decl Duplicated declaration of constants even in enum name is not valid. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -542,6 +542,10 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + if enum.name in self._int_constants: + raise api.FFIError( + "multiple declarations of constant %s" % (enum.name,)) + self._int_constants[enum.name] = nextenumvalue nextenumvalue += 1 enumvalues = tuple(enumvalues) @@ -556,3 +560,9 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + if k not in self._int_constants: + self._int_constants[k] = v + else: + raise api.FFIError( + "multiple declarations of constant %s" % (k,)) diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -865,25 +865,25 @@ def test_enum(self): ffi = FFI(backend=self.Backend()) - ffi.cdef("enum foo { A, B, CC, D };") - assert ffi.string(ffi.cast("enum foo", 0)) == "A" - assert ffi.string(ffi.cast("enum foo", 2)) == "CC" - assert ffi.string(ffi.cast("enum foo", 3)) == "D" + ffi.cdef("enum foo { A0, B0, CC0, D0 };") + assert ffi.string(ffi.cast("enum foo", 0)) == "A0" + assert ffi.string(ffi.cast("enum foo", 2)) == "CC0" + assert ffi.string(ffi.cast("enum foo", 3)) == "D0" assert ffi.string(ffi.cast("enum foo", 4)) == "4" - ffi.cdef("enum bar { A, B=-2, CC, D, E };") - assert ffi.string(ffi.cast("enum bar", 0)) == "A" - assert ffi.string(ffi.cast("enum bar", -2)) == "B" - assert ffi.string(ffi.cast("enum bar", -1)) == "CC" - assert ffi.string(ffi.cast("enum bar", 1)) == "E" + ffi.cdef("enum bar { A1, B1=-2, CC1, D1, E1 };") + assert ffi.string(ffi.cast("enum bar", 0)) == "A1" + assert ffi.string(ffi.cast("enum bar", -2)) == "B1" + assert ffi.string(ffi.cast("enum bar", -1)) == "CC1" + assert ffi.string(ffi.cast("enum bar", 1)) == "E1" assert ffi.cast("enum bar", -2) != ffi.cast("enum bar", -2) assert ffi.cast("enum foo", 0) != ffi.cast("enum bar", 0) assert ffi.cast("enum bar", 0) != ffi.cast("int", 0) - assert repr(ffi.cast("enum bar", -1)) == "" + assert repr(ffi.cast("enum bar", -1)) == "" assert repr(ffi.cast("enum foo", -1)) == ( # enums are unsigned, if "") # they contain no neg value - ffi.cdef("enum baz { A=0x1000, B=0x2000 };") - assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A" - assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B" + ffi.cdef("enum baz { A2=0x1000, B2=0x2000 };") + assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A2" + assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B2" def test_enum_in_struct(self): ffi = FFI(backend=self.Backend()) @@ -1324,12 +1324,13 @@ def test_enum_refer_previous_enum_value(self): ffi = FFI(backend=self.Backend()) - ffi.cdef("enum e { AA, BB=2, CC=4, DD=BB, EE, FF=CC };") + ffi.cdef("enum e { AA, BB=2, CC=4, DD=BB, EE, FF=CC, GG=FF };") assert ffi.string(ffi.cast("enum e", 2)) == "BB" assert ffi.string(ffi.cast("enum e", 3)) == "EE" assert ffi.sizeof("char[DD]") == 2 assert ffi.sizeof("char[EE]") == 3 assert ffi.sizeof("char[FF]") == 4 + assert ffi.sizeof("char[GG]") == 4 def test_nested_anonymous_struct(self): ffi = FFI(backend=self.Backend()) @@ -1552,6 +1553,7 @@ ffi2.include(ffi1) p = ffi2.cast("enum foo", 1) assert ffi2.string(p) == "FB" + assert ffi2.sizeof("char[FC]") == 2 def test_include_typedef_2(self): backend = self.Backend() From noreply at buildbot.pypy.org Wed Apr 2 09:26:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 09:26:21 +0200 (CEST) Subject: [pypy-commit] cffi default: Merged in mozbugbox/cffi/reusable-enum-values (pull request #29) Message-ID: <20140402072621.BEE271C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1489:e6c30206e1db Date: 2014-04-02 09:26 +0200 http://bitbucket.org/cffi/cffi/changeset/e6c30206e1db/ Log: Merged in mozbugbox/cffi/reusable-enum-values (pull request #29) Make int constant available in the following declaration. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -99,6 +99,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -514,6 +515,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -537,6 +542,11 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + if enum.name in self._int_constants: + raise api.FFIError( + "multiple declarations of constant %s" % (enum.name,)) + + self._int_constants[enum.name] = nextenumvalue nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -550,3 +560,9 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + if k not in self._int_constants: + self._int_constants[k] = v + else: + raise api.FFIError( + "multiple declarations of constant %s" % (k,)) diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -865,25 +865,25 @@ def test_enum(self): ffi = FFI(backend=self.Backend()) - ffi.cdef("enum foo { A, B, CC, D };") - assert ffi.string(ffi.cast("enum foo", 0)) == "A" - assert ffi.string(ffi.cast("enum foo", 2)) == "CC" - assert ffi.string(ffi.cast("enum foo", 3)) == "D" + ffi.cdef("enum foo { A0, B0, CC0, D0 };") + assert ffi.string(ffi.cast("enum foo", 0)) == "A0" + assert ffi.string(ffi.cast("enum foo", 2)) == "CC0" + assert ffi.string(ffi.cast("enum foo", 3)) == "D0" assert ffi.string(ffi.cast("enum foo", 4)) == "4" - ffi.cdef("enum bar { A, B=-2, CC, D, E };") - assert ffi.string(ffi.cast("enum bar", 0)) == "A" - assert ffi.string(ffi.cast("enum bar", -2)) == "B" - assert ffi.string(ffi.cast("enum bar", -1)) == "CC" - assert ffi.string(ffi.cast("enum bar", 1)) == "E" + ffi.cdef("enum bar { A1, B1=-2, CC1, D1, E1 };") + assert ffi.string(ffi.cast("enum bar", 0)) == "A1" + assert ffi.string(ffi.cast("enum bar", -2)) == "B1" + assert ffi.string(ffi.cast("enum bar", -1)) == "CC1" + assert ffi.string(ffi.cast("enum bar", 1)) == "E1" assert ffi.cast("enum bar", -2) != ffi.cast("enum bar", -2) assert ffi.cast("enum foo", 0) != ffi.cast("enum bar", 0) assert ffi.cast("enum bar", 0) != ffi.cast("int", 0) - assert repr(ffi.cast("enum bar", -1)) == "" + assert repr(ffi.cast("enum bar", -1)) == "" assert repr(ffi.cast("enum foo", -1)) == ( # enums are unsigned, if "") # they contain no neg value - ffi.cdef("enum baz { A=0x1000, B=0x2000 };") - assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A" - assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B" + ffi.cdef("enum baz { A2=0x1000, B2=0x2000 };") + assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A2" + assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B2" def test_enum_in_struct(self): ffi = FFI(backend=self.Backend()) @@ -1322,6 +1322,16 @@ e = ffi.cast("enum e", 0) assert ffi.string(e) == "AA" # pick the first one arbitrarily + def test_enum_refer_previous_enum_value(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("enum e { AA, BB=2, CC=4, DD=BB, EE, FF=CC, GG=FF };") + assert ffi.string(ffi.cast("enum e", 2)) == "BB" + assert ffi.string(ffi.cast("enum e", 3)) == "EE" + assert ffi.sizeof("char[DD]") == 2 + assert ffi.sizeof("char[EE]") == 3 + assert ffi.sizeof("char[FF]") == 4 + assert ffi.sizeof("char[GG]") == 4 + def test_nested_anonymous_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -1543,6 +1553,7 @@ ffi2.include(ffi1) p = ffi2.cast("enum foo", 1) assert ffi2.string(p) == "FB" + assert ffi2.sizeof("char[FC]") == 2 def test_include_typedef_2(self): backend = self.Backend() From noreply at buildbot.pypy.org Wed Apr 2 09:44:36 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 2 Apr 2014 09:44:36 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Small improvements and compiler-fix. Message-ID: <20140402074436.4624A1C0178@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r737:1522dcd48891 Date: 2014-04-02 09:34 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/1522dcd48891/ Log: Small improvements and compiler-fix. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -817,7 +817,7 @@ return self._size def str_content(self): - return self.as_string() + return "'%s'" % self.as_string() def as_string(self): if self.bytes is not None: @@ -1301,7 +1301,7 @@ if w_candidate.is_class(space): w_compiledin = w_candidate self.w_compiledin = w_compiledin - assert isinstance(w_compiledin, W_PointersObject) + assert w_compiledin is None or isinstance(w_compiledin, W_PointersObject) return w_compiledin # === Object Access === diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -476,6 +476,8 @@ while look_in_shadow is not None: w_method = look_in_shadow.s_methoddict().find_selector(w_selector) if w_method is not None: + # Old images don't store compiledin-info in literals. + w_method.w_compiledin = look_in_shadow.w_self() return w_method look_in_shadow = look_in_shadow._s_superclass raise MethodNotFound(self, w_selector) From noreply at buildbot.pypy.org Wed Apr 2 09:44:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 2 Apr 2014 09:44:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-allocRemoval: Added an allocation-removal optimization for W_SmallInteger objects. Message-ID: <20140402074437.5ED9F1C0178@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-allocRemoval Changeset: r738:1efa7a6e3ddc Date: 2014-04-02 09:44 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/1efa7a6e3ddc/ Log: Added an allocation-removal optimization for W_SmallInteger objects. If a small-int is stored into a slot already containing a small-int, then the value will just be overwritten, without changing the object-reference. This means W_SmallInteger objects are not immutable. It also means the JIT can eliminate a lot of allocations. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -200,7 +200,6 @@ # TODO can we tell pypy that its never larger then 31-bit? _attrs_ = ['value'] __slots__ = ('value',) # the only allowed slot here - _immutable_fields_ = ["value"] repr_classname = "W_SmallInteger" def __init__(self, value): @@ -264,6 +263,18 @@ def clone(self, space): return self + + def store_into_list(w_int, list_of_w_obj, index): + """This implements an allocation-removal optimization for Small Integers. + If some slot already contains a SmallInt, we simply store the new value. + This way, the JIT can eliminate lots of allocations of W_SmallInteger objects. + This must be used when storing into regular objects and also the stack/temps of contexts.""" + # This code is duplicated in ContextPartShadow.stack_put + w_old = list_of_w_obj[index] + if isinstance(w_old, W_SmallInteger): + w_old.value = w_int.value + else: + list_of_w_obj[index] = W_SmallInteger(w_int.value) class W_AbstractObjectWithIdentityHash(W_Object): """Object with explicit hash (ie all except small diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -229,7 +229,11 @@ def fetch(self, n0): return self.storage[n0] def store(self, n0, w_value): - self.storage[n0] = w_value + assert n0 >= 0 + if isinstance(w_value, model.W_SmallInteger): + model.W_SmallInteger.store_into_list(w_value, self.storage, n0) + else: + self.storage[n0] = w_value class WeakListStorageShadow(AbstractStorageShadow): _attrs_ = ['storage'] @@ -835,8 +839,18 @@ def stack_get(self, index0): return self._temps_and_stack[index0] - def stack_put(self, index0, w_val): - self._temps_and_stack[index0] = w_val + def stack_put(self, index0, w_value): + if isinstance(w_value, model.W_SmallInteger): + # This code is copied from W_SmallInteger.store_into_list + # Cannot use that method, because _temps_and_stack is virtualizable + # and is not allowed to be passed around. + w_old = self._temps_and_stack[index0] + if isinstance(w_old, model.W_SmallInteger): + w_old.value = w_value.value + else: + self._temps_and_stack[index0] = model.W_SmallInteger(w_value.value) + else: + self._temps_and_stack[index0] = w_value def stack(self): """NOT_RPYTHON""" # purely for testing diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -217,9 +217,6 @@ # dictionary mapping old address to chunk object self.chunks = {} self.chunklist = [] - # cache wrapper integers - self.intcache = {} - self.lastWindowSize = 0 def initialize(self): @@ -440,12 +437,7 @@ self.reader = reader self.value = value self.size = -1 - if value in reader.intcache: - w_int = reader.intcache[value] - else: - w_int = self.space.wrap_int(value) - reader.intcache[value] = w_int - self.w_object = w_int + self.w_object = self.space.wrap_int(value) self.filled_in = True def initialize(self, chunk, reader): From noreply at buildbot.pypy.org Wed Apr 2 11:52:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 11:52:47 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Remove the very old STM planning file here Message-ID: <20140402095248.01BBF1C14E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5167:b21fff1ac422 Date: 2014-04-02 11:24 +0200 http://bitbucket.org/pypy/extradoc/changeset/b21fff1ac422/ Log: Remove the very old STM planning file here diff --git a/planning/stm.txt b/planning/stm.txt deleted file mode 100644 --- a/planning/stm.txt +++ /dev/null @@ -1,322 +0,0 @@ -============ -STM planning -============ - -| -| Bars on the left describe the next thing to work on. -| On the other hand, "TODO" means "to do later". -| - - -Python Interface ----------------- - -Planned interface refactorings: - -* inspired by "concurrent.futures" from Python 3.2: have - transaction.add() return a Future instance with a method result(). - If called inside another transaction, it will suspend it until the - result is available, i.e. until the first transaction commits. Can - be used even if the result is not used, just to ensure some ordering. - XXX but can that be emulated in transaction.py??? - -* (later, maybe) allow nested transactions: either by calling - transaction.run() inside transactions too, or with actual objects - that store collections of transactions. - - -Overview of the GC ------------------- - -A saner approach (and likely better results that now): integrate with -the GC. Here is the basic plan. - -Let T be the number of threads. Use a custom GC, with T nurseries and -one "global area." Every object in the nursery t is only visible to -thread t. Every object in the global area is shared but read-only. -Changes to global objects are only done by committing. - -Every thread t allocates new objects in the nursery t. Accesses to -nursery objects are the fastest, not monitored at all. When we need -read access to a global object, we can read it directly, but we need to -record the version of the object that we read. When we need write -access to a global object, we need to make a whole copy of it into our -nursery. - -| The "global area" should be implemented by reusing gc/minimarkpage.py. - -The RPython program can use this hint: 'x = hint(x, stm_write=True)', -which is like writing to an object in the sense that it forces a local -copy. - -In translator.stm.transform, we track which variables contain objects -that are known to be local. It lets us avoid the run-time check. -That's useful for all freshly malloc'ed objects, which we know are -always local; and that's useful for special cases like the PyFrames, on -which we used the "stm_write=True" hint before running the interpreter. -In both cases the result is: no STM code is needed any more. - -When a transaction commits, we do a "minor collection"-like process, -called an "end-of-transaction collection": we move all surviving objects -from the nursery to the global area, either as new objects (first step -done by stmgc.py), or as overwrites of their previous version (second -step done by et.c). Unlike the minor collections in other GCs, this one -occurs at a well-defined time, with no stack roots to scan. - -| We also need to consider what occurs if a nursery grows too big while -| the transaction is still not finished. In this case we need to run a -| similar collection of the nursery, but with stack roots to scan. We -| call this a local collection. -| -| This can also occur before or after we call transaction.run(), when -| there is only the main thread running. In this mode, we run the main -| thread with a nursery too. It can fill up, needing a local collection. -| When transaction.run() is called, we also do a local collection to -| ensure that the nursery of the main thread is empty while the -| transactions execute. -| -| Of course we also need to do from time to time a major collection. We -| will need at some point some concurrency here, to be able to run the -| major collection in a random thread t but detecting changes done by the -| other threads overwriting objects during their own end-of-transaction -| collections. See below. - - -GC flags --------- - -Still open to consideration, but the basic GC flags could be: - - * GC_GLOBAL if the object is in the global area - - * GC_WAS_COPIED on a global object: it has at least one local copy - (then we need to look it up in some local dictionary) - on a local object: it comes from a global object - - * and one complete word (for now?) for the version number, see below - -(Optimization: objects declared immutable don't need a version number.) - -TODO: GC_WAS_COPIED should rather be some counter, counting how many threads -have a local copy; something like 2 or 3 bits, where the maximum value -means "overflowed" and is sticky (maybe until some global -synchronization point, if we have one). Or, we can be more advanced and -use 4-5 bits, where in addition we use some "thread hash" value if there -is only one copy. - - -stm_read --------- - -The STM read operation is potentially a complex operation. (That's why -it's useful to remove it as much as possible.) - -stm_read(obj, offset) -> field value - -- If obj is not GC_GLOBAL, then read directly and be done. - -- Otherwise, if GC_WAS_COPIED, and if we find 'localobj' in this - thread's local dictionary, then read directly from 'localobj' and - be done. (Ideally we should also use 'localobj' instead of 'obj' - in future references to this object, but unclear how.) - -- Otherwise, we need to do a global read. This is a real STM read. - Done (on x86 [1]) by reading the version number, then the actual field, - then *again* the version number. If the version number didn't change - and if it is not more recent than the transaction start, then the read - is accepted; otherwise not (we might retry or abort the transaction, - depending on cases). And if the read is accepted then we need to - remember in a local list that we've read that object. - -For now the thread's local dictionary is in C, as a widely-branching -search tree. - - -stm_write ---------- - -- If obj is GC_GLOBAL, we need to find or make a local copy - -- Then we just perform the write. - -This means that stm_write could be implemented with a write barrier that -returns potentially a copy of the object, and which is followed by a -regular write to that copy. - -Note that "making a local copy" implies the same rules as stm_read: read -the version number, copy all fields, then read *again* the version -number [1]. If it didn't change, then we know that we got at least a -consistent copy (i.e. nobody changed the object in the middle of us -reading it). If it is too recent, then we might have to abort. - -TODO: how do we handle MemoryErrors when making a local copy?? -Maybe force the transaction to abort, and then re-raise MemoryError ---- for now it's just a fatal error. - - -End-of-transaction collections ------------------------------- - -Start from the "roots" being all local copies of global objects. (These -are the only roots: if there are none, then it means we didn't write -anything in any global object, so there is no new object that can -survive.) From the roots, scan and move all fresh new objects to the -global area. Add the GC_GLOBAL flag to them, of course. Then we need, -atomically (in the STM sense), to overwrite the old global objects with -their local copies. This is done by temporarily locking the global -objects with a special value in their "version" field that will cause -concurrent reads to spin-loop. - -This is also where we need the list of global objects that we've read. -We need to check that each of these global objects' versions have not -been modified in the meantime. - - -Static analysis support ------------------------ - -To get good performance, we should as much as possible use the -'localobj' version of every object instead of the 'obj' one. At least -after a write barrier we should replace the local variable 'obj' with -'localobj', and translator.stm.transform propagates the -fact that it is now a localobj that doesn't need special stm support -any longer. Similarly, all mallocs return a localobj. - -The "stm_write=True" hint is used on PyFrame before the main -interpreter loop, so that we can then be sure that all accesses to -'frame' are to a local obj. - -TODO: Ideally, we could even track which fields -of a localobj are themselves localobjs. This would be useful for -'PyFrame.fastlocals_w': it should also be known to always be a localobj. - - -Local collections ------------------ - -| -| This needs to be done. -| - -If a nursery fills up too much during a transaction, it needs to be -locally collected. This is supposed to be a generally rare occurrance, -with the exception of long-running transactions --- including the main -thread before transaction.run(). - -Surviving local objects are moved to the global area. However, the -GC_GLOBAL flag is still not set on them, because they are still not -visible from more than one thread. For now we have to put all such -objects in a list: the list of old-but-local objects. (Some of these -objects can still have the GC_WAS_COPIED flag and so be duplicates of -other really global objects. The dict maintained by et.c must be -updated when we move these objects.) - -Unlike end-of-transaction collections, we need to have the stack roots -of the current transaction. For now we just use -"gcrootfinder=shadowstack" with thread-local variables. At the end of -the local collection, we do a sweep: all objects that were previously -listed as old-but-local but don't survive the present collection are -marked as free. - -TODO: Try to have a generational behavior here. Could probably be done -by (carefully) promoting part of the surviving objects to GC_GLOBAL. - -If implemented like minimarkpage.py, the global area has for each size a -chained list of pages that are (at least partially) free. We make the -heads of the chained lists thread-locals; so each thread reserves one -complete page at a time, reducing cross-thread synchronizations. - -TODO: The local collection would also be a good time to compress the -local list of all global reads done --- "compress" in the sense of -removing duplicates. - - -Global collections ------------------- - -| -| This needs to be done. -| - -We will sometimes need to do a "major" collection, called global -collection here. The issue with it is that there might be live -references to global objects in the local objects of any thread. The -problem becomes even harder as some threads may be currently blocked in -some system call. As an intermediate solution that should work well -enough, we could try to acquire a lock for every thread, a kind of LIL -(local interpreter lock). Every thread releases its LIL around -potentially-blocking system calls. At the end of a transaction and once -per local collection, we also do the equivalent of a -release-and-require-the-LIL. The point is that when a LIL is released, -another thread can acquire it temporarily and read the shadowstack of -that thread. - -The major collection is orchestrated by whichever thread noticed one -should start; let's call this thread tg. So tg first acquires all the -LILs. (A way to force another thread to "soon" release its LIL is to -artifically mark its nursery as exhausted.) For each thread t, tg -performs a local collection for t. This empties all the nurseries and -gives tg an up-to-date point of view on the liveness of the objects: the -various lists of old-but-local objects for all the t's. tg can use -these --- plus external roots like prebuilt objects --- as the roots of -a second-level, global mark-and-sweep. - -For now we release the LILs only when the major collection is finished. - -TODO: either release the LILs earlier, say after we processed the lists -of old-but-local objects but before we went on marking and sweeping --- -but we need support for detecting concurrent writes done by concurrent -commits; or, ask all threads currently waiting on the LIL to help with -doing the global mark-and-sweep in parallel. - -Note: standard terminology: - -* Concurrency: there is one thread that does something GC-related, - like scan the heap, and at the same time another thread changes - some object from the heap. - -* Parallelism: there are multiple threads all doing something GC-related, - like all scanning the heap together. - - -When not running transactively ------------------------------- - -The above describes the mode during which there is a main thread blocked -in transaction.run(). The other mode is mostly that of "start-up", -before we call transaction.run(). Of course no STM is needed in that -mode, but it's still running the same STM-enabled interpreter. - -| In this mode, we just have one nursery and the global area. When -| transaction.run() is called, we do a local collection to empty it, then -| make sure to flag all surviving objects as GC_GLOBAL in preparation for -| starting actual transactions. Then we can reuse the nursery itself for -| one of the threads. - - -Pointer equality ----------------- - -Another (traditionally messy) issue is that by having several copies of -the same object, we need to take care of all pointer comparisons too. -This is all llops of the form ``ptr_eq(x, y)`` or ``ptr_ne(x, y)``. - -If we know statically that both copies are local copies, then we can -just compare the pointers. Otherwise, we compare -``stm_normalize_global(x)`` with ``stm_normalize_global(y)``, where -``stm_normalize_global(obj)`` returns ``globalobj`` if ``obj`` is a -local, GC_WAS_COPIED object. Moreover the call to -``stm_normalize_global()`` can be omitted for constants. - - -JIT support ------------ - -TODO - - -notes ------ - -[1] this relies on a property guaranteed so far by the x86, but not, - say, by PowerPCs. (XXX find a reference again) From noreply at buildbot.pypy.org Wed Apr 2 11:52:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 11:52:49 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Starting on tmdonate2.txt. Message-ID: <20140402095249.3FB1A1C14E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5168:8ddd33e59d71 Date: 2014-04-02 11:52 +0200 http://bitbucket.org/pypy/extradoc/changeset/8ddd33e59d71/ Log: Starting on tmdonate2.txt. diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt new file mode 100644 --- /dev/null +++ b/planning/tmdonate2.txt @@ -0,0 +1,60 @@ +--- +layout: page +title: 2nd Call for donations - Transactional Memory in PyPy +--- + +============================== +Transactional Memory, 2nd Call +============================== + + +This is the second call for donations on the topic of Transactional +Memory (TM) in PyPy, a way to run CPU-hungry Python programs in +multithreaded mode. It is a follow-up on our `first call`_. Two years +ago we suggested a single-threaded slow-down of somewhere between 2x and +5x. Our aim now is closer to 1.25x, i.e. running only 25% slower than +the regular PyPy. + +We achieved --or overachieved-- most goals laid out in the first call by +a large margin, while at the same time raising only about half the +money. The present proposal is thus about development of the second +half: starting from the various missing low-level optimizations, it will +most importantly focus on development of the Python-facing interface. +This includes both internal things (e.g. do dictionaries need to be more +TM-friendly in general?) as well as directly visible things (e.g. some +debugger-like interface to explore common conflicts in a program). It +also includes exploring and tweaking some existing libraries +(e.g. Twisted) to improve their TM-friendliness. + +See also the `update on HTM`_ below. + + + +Introduction +============ + + + + + +In more details +=============== + + +Hardware Transactional Memory +----------------------------- + +More readings +------------- + + + + +Work plan +========= + + + + +Benefits of This Work to the Python Community and the General Public +==================================================================== From noreply at buildbot.pypy.org Wed Apr 2 12:07:04 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 2 Apr 2014 12:07:04 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed weird variable from interpreter. Message-ID: <20140402100704.F1D5B1C14E1@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r739:a4f5299baa72 Date: 2014-04-02 10:12 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a4f5299baa72/ Log: Removed weird variable from interpreter. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -27,7 +27,6 @@ "max_stack_depth", "interrupt_counter_size", "startup_time", "evented"] _w_last_active_context = None - cnt = 0 _last_indent = "" jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], From noreply at buildbot.pypy.org Wed Apr 2 12:07:21 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 2 Apr 2014 12:07:21 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed up the benchmark code in the noBitBlt image. Message-ID: <20140402100721.8C0281C14E1@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r740:bae299e5ce2f Date: 2014-04-02 12:06 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/bae299e5ce2f/ Log: Fixed up the benchmark code in the noBitBlt image. diff too long, truncating to 2000 out of 4946 lines diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12198,4 +12198,14 @@ ]. "self footer." - ^ self! ! SMarkRunner execute: CPBAStarBenchmark new with: 3 ! SMarkRunner execute: CPBAStarBenchmark new with: 3 ! SMarkRunner execute: CPBAStarBenchmark new with: 3 ! Object subclass: #Benchmarks instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Cross-Platform-Benchmarks'! !Benchmarks class methodsFor: 'no messages' stamp: 'ag 3/8/2014 23:21'! runAll: iterations ^ String streamContents: [ :str | self allBenchmarks do: [ :bench | str nextPutAll: (SMarkRunner execute: bench new with: iterations) ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:21' prior: 49281266! runAll: iterations ^ String streamContents: [ :str | self allBenchmarks do: [ :bench | str nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:22'! allBenchmarks ^ { CPBAStarBenchmark. }! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:26' prior: 49281795! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. CPBPolymorphyBenchmark. CPBRichardsBenchmark. CPBSplayTreeBenchmark. }! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:26' prior: 49281938! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. CPBPolymorphyBenchmark. CPBRichardsBenchmark. CPBSplayTreeBenchmark. }! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:27'! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str cr; nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:27' prior: 49281534! runAll: iterations ^ self run: self allBenchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:27'! runAll ^ self runAll: 5! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:29'! list ^ String streamContents: [ :str | self allBenchmarks do: [ :bench | str cr; nextPutAll: bench name asString ] ]! ! 'CPBBinaryTreeBenchmark' indexOf: 'Binary'! 'CPBBinaryTreeBenchmark' includesSubString: 'Binary'! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:30' prior: 49283190! list ^ String streamContents: [ :str | self allBenchmarkNames do: [ :benchName | str cr; nextPutAll: benchName ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:31'! allBenchmarkNames ^ self allBenchmarks collect: [ :bench | bench name asString ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:32'! selectBenchmarks: substring ^ self allBenchmarks select: [ :bench | bench name includesSubString: substring ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:35'! runMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:35' prior: 49284077! runMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ^ self run: benchmarks iterations: iterations! ! Benchmarks class organization addCategory: #'category name'! Benchmarks class organization renameCategory: #'category name' toBe: #cli! !Benchmarks class methodsFor: 'cli' stamp: 'ag 3/8/2014 23:36' prior: 49283511! list ^ String streamContents: [ :str | self allBenchmarkNames do: [ :benchName | str cr; nextPutAll: benchName ] ]! ! Benchmarks class organization renameCategory: #'as yet unclassified' toBe: #benchmarks! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:31' prior: 49283714! allBenchmarkNames ^ self allBenchmarks collect: [ :bench | bench name asString ]! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:26' prior: 49282287! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. CPBPolymorphyBenchmark. CPBRichardsBenchmark. CPBSplayTreeBenchmark. }! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:27' prior: 49282628! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str cr; nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:27' prior: 49283080! runAll ^ self runAll: 5! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:27' prior: 49282921! runAll: iterations ^ self run: self allBenchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:35' prior: 49284576! runMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ^ self run: benchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:32' prior: 49283881! selectBenchmarks: substring ^ self allBenchmarks select: [ :bench | bench name includesSubString: substring ]! ! Benchmarks class organization addCategory: #private! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:37' prior: 49286064! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str cr; nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:37' prior: 49285542! allBenchmarkNames ^ self allBenchmarks collect: [ :bench | bench name asString ]! ! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:37' prior: 49287167! selectBenchmarks: substring ^ self allBenchmarks select: [ :bench | bench name includesSubString: substring ]! ! Benchmarks runMatching: 'Binary' iterations: 3! Benchmarks runMatching: 'Bin' iterations: 3! Benchmarks runMatching: 'Bidn' iterations: 3! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! Benchmarks runAll ! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:48' prior: 49287421! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! ----SNAPSHOT----{8 March 2014 . 11:49:02 pm} Squeak4.5-noBitBlt.image priorSource: 15724710! ----STARTUP----{11 March 2014 . 10:13:40 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! From noreply at buildbot.pypy.org Wed Apr 2 13:05:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 13:05:35 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/ba29f5ab1dcd Message-ID: <20140402110535.223331D22DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70393:eec6a7eac1a7 Date: 2014-04-02 13:04 +0200 http://bitbucket.org/pypy/pypy/changeset/eec6a7eac1a7/ Log: import stmgc/ba29f5ab1dcd diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -a8d0ff724dea +ba29f5ab1dcd diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -684,6 +684,10 @@ STM_SEGMENT->jmpbuf_ptr = NULL; clear_callbacks_on_abort(); } + else { + assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); + assert(STM_SEGMENT->jmpbuf_ptr == NULL); + } s_mutex_unlock(); } From noreply at buildbot.pypy.org Wed Apr 2 16:14:04 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 2 Apr 2014 16:14:04 +0200 (CEST) Subject: [pypy-commit] pypy default: set env in subprocess Message-ID: <20140402141404.BCC6B1C022D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70394:5ccb263575f6 Date: 2014-04-02 17:12 +0300 http://bitbucket.org/pypy/pypy/changeset/5ccb263575f6/ Log: set env in subprocess diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -414,7 +414,8 @@ try: returncode, stdout, stderr = _run_subprocess( 'nmake', - ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts) + ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts, + evn = self.c_environ) finally: oldcwd.chdir() From noreply at buildbot.pypy.org Wed Apr 2 17:42:55 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 2 Apr 2014 17:42:55 +0200 (CEST) Subject: [pypy-commit] pypy default: whoops Message-ID: <20140402154255.0D02E1C320C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70395:5f9dabe859e6 Date: 2014-04-02 18:41 +0300 http://bitbucket.org/pypy/pypy/changeset/5f9dabe859e6/ Log: whoops diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -415,7 +415,7 @@ returncode, stdout, stderr = _run_subprocess( 'nmake', ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts, - evn = self.c_environ) + env = self.c_environ) finally: oldcwd.chdir() From noreply at buildbot.pypy.org Wed Apr 2 18:14:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 18:14:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a document about STM. Message-ID: <20140402161423.EF49C1C35EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70396:0d281ac894e2 Date: 2014-04-02 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/0d281ac894e2/ Log: Add a document about STM. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/stm.rst @@ -0,0 +1,236 @@ +====================== +Transactional Memory +====================== + +.. contents:: + + +This page is about ``pypy-stm``, a special in-development version of +PyPy which can run multiple independent CPU-hungry threads in the same +process in parallel. It is side-stepping what is known in the Python +world as the "global interpreter lock (GIL)" problem. + +"STM" stands for Software Transactional Memory, the technique used +internally. This page describes ``pypy-stm`` from the perspective of a +user, describes work in progress, and finally gives references to more +implementation details. + +This work was done by Remi Meier and Armin Rigo. + + +Introduction and current status +=============================== + +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats +listed below, it should be in theory within 25%-50% of the speed of +PyPy, comparing the JITting version in both cases. It is called STM for +Software Transactional Memory, which is the internal technique used (see +`Reference to implementation details`_). + +**pypy-stm requires 64-bit Linux for now.** + +Development is done in the branch `stmgc-c7`_. If you are only +interested in trying it out, you can download a Ubuntu 12.04 binary +here__. The current version supports four "segments", which means that +it will run up to four threads in parallel (in other words, you get a +GIL effect again, but only if trying to execute more than 4 threads). + +To build a version from sources, you first need to compile a custom +version of clang; we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +for all checkouts). Then apply all the patches in `this directory`__: +they are fixes for the very extensive usage that pypy-stm does of a +clang-only feature (without them, you get crashes of clang). Then get +the branch `stmgc-c7`_ of PyPy and run:: + + rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py + +.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +.. __: http://buildbot.pypy.org/nightly/stmgc-c7/ +.. __: http://clang.llvm.org/get_started.html +.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ + + +Caveats: + +* It should generally work. Please do `report bugs`_ that manifest as a + crash or wrong behavior (markedly different from the behavior of a + regular PyPy). Performance bugs are likely to be known issues; we're + working on them. + +* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, + which is "only" bad). Moreover, you should run it with a command like + ``pypy-stm --jit trace_limit=60000 args...``; the default value of + 6000 for ``trace_limit`` is currently too low (6000 should become + reasonable again as we improve). Also, in order to produce machine + code, the JIT needs to enter a special single-threaded mode for now. + This all means that you *will* get very bad performance results if + your program doesn't run for *many* seconds for now. + +* The GC is new; although clearly inspired by PyPy's regular GC, it + misses a number of optimizations for now. Programs allocating large + numbers of small objects that don't immediately die, as well as + programs that modify large lists or dicts, suffer from these missing + optimizations. + +* The GC has no support for destructors: the ``__del__`` method is + never called (including on file objects, which won't be closed for + you). This is of course temporary. + +* The STM system is based on very efficient read/write barriers, which + are mostly done (their placement could be improved a bit in + JIT-generated machine code). But the overall bookkeeping logic could + see more improvements (see Statistics_ below). + +* You can use `atomic sections`_, but the most visible missing thing is + that you don't get reports about the "conflicts" you get. This would + be the first thing that you need in order to start using atomic + sections more extensively. Also, for now: for better results, try to + explicitly force a transaction break just before (and possibly after) + each large atomic section, with ``time.sleep(0)``. + +.. _`report bugs`: https://bugs.pypy.org/ + + + +Statistics +========== + +When a non-main thread finishes, you get statistics printed to stderr, +looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + spin loop 129512 0.094 s + +The first number is a counter; the second number gives the associated +time (the amount of real time that the thread was in this state; the sum +of all the times should be equal to the total time between the thread's +start and the thread's end). The most important points are "run +committed", which gives the amount of useful work, and "outside +transaction", which should give the time spent e.g. in library calls +(right now it seems to be a bit larger than that; to investigate). +Everything else is overhead of various forms. (Short-, medium- and +long-term future work involves reducing this overhead :-) + +These statistics are not printed out for the main thread, for now. + + +Atomic sections +=============== + +While one of the goal of pypy-stm is to give a GIL-free but otherwise +unmodified Python, the other goal is to push for a better way to use +multithreading. For this, you (as the Python programmer) get an API +in the ``__pypy__.thread`` submodule: + +* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in + a ``with __pypy__.thread.atomic:`` statement). It runs the whole + block of code without breaking the current transaction --- from + the point of view of a regular CPython/PyPy, this is equivalent to + saying that the GIL will not be released at all between the start and + the end of this block of code. + +The obvious usage is to use atomic blocks in the same way as one would +use locks: to protect changes to some shared data, you do them in a +``with atomic`` block, just like you would otherwise do them in a ``with +mylock`` block after ``mylock = thread.allocate_lock()``. This allows +you not to care about acquiring the correct locks in the correct order; +it is equivalent to having only one global lock. This is how +transactional memory is `generally described`__: as a way to efficiently +execute such atomic blocks, running them in parallel while giving the +illusion that they run in some serial order. + +.. __: http://en.wikipedia.org/wiki/Transactional_memory + +However, the less obvious intended usage of atomic sections is as a +wide-ranging replacement of explicit threads. You can turn a program +that is not multi-threaded at all into a program that uses threads +internally, together with large atomic sections to keep the behavior +unchanged. This capability can be hidden in a library or in the +framework you use; the end user's code does not need to be explicitly +aware of using threads. For a simple example of this, see +`lib_pypy/transaction.py`_. The idea is that if you have a program +where the function ``f(key, value)`` runs on every item of some big +dictionary, you can replace the loop with:: + + for key, value in bigdict.items(): + transaction.add(f, key, value) + transaction.run() + +This code runs the various calls to ``f(key, value)`` using a thread +pool, but every single call is done in an atomic section. The end +result is that the behavior should be exactly equivalent: you don't get +any extra multithreading issue. + +.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py + +================== + +Other APIs in pypy-stm: + +* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" + in this pypy-stm. This is the limit above which more threads will not + be able to execute on more cores. (Right now it is limited to 4 due + to inter-segment overhead, but should be increased in the future. It + should also be settable, and the default value should depend on the + number of actual CPUs.) + +* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but + raises an exception if you attempt to nest it inside another + ``atomic``. + +* ``__pypy__.thread.signals_enabled``: a context manager that runs + its block with signals enabled. By default, signals are only + enabled in the main thread; a non-main thread will not receive + signals (this is like CPython). Enabling signals in non-main threads + is useful for libraries where threads are hidden and the end user is + not expecting his code to run elsewhere than in the main thread. + +Note that all of this API is (or will be) implemented in a regular PyPy +too: for example, ``with atomic`` will simply mean "don't release the +GIL" and ``getsegmentlimit()`` will return 1. + +================== + + +Reference to implementation details +=================================== + +The core of the implementation is in a separate C library called stmgc_, +in the c7_ subdirectory. Please see the `README.txt`_ for more +information. + +.. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ +.. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ +.. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt + +PyPy itself adds on top of it the automatic placement of read__ and write__ +barriers and of `"becomes-inevitable-now" barriers`__, the logic to +`start/stop transactions as an RPython transformation`__ and as +`supporting`__ `C code`__, and the support in the JIT (mostly as a +`transformation step on the trace`__ and generation of custom assembler +in `assembler.py`__). + +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/readbarrier.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/memory/gctransform/stmframework.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/inevitable.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/jitdriver.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.h +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py From noreply at buildbot.pypy.org Wed Apr 2 18:48:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 18:48:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Mention fork() Message-ID: <20140402164817.B29E21C320C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70397:832efcce27b1 Date: 2014-04-02 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/832efcce27b1/ Log: Mention fork() diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -89,6 +89,9 @@ explicitly force a transaction break just before (and possibly after) each large atomic section, with ``time.sleep(0)``. +* Forking the process is slow because the complete memory needs to be + copied manually right now. + .. _`report bugs`: https://bugs.pypy.org/ From noreply at buildbot.pypy.org Wed Apr 2 18:58:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 18:58:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Mention another known crash Message-ID: <20140402165813.921F31C320C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70398:6d82cad79057 Date: 2014-04-02 18:57 +0200 http://bitbucket.org/pypy/pypy/changeset/6d82cad79057/ Log: Mention another known crash diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -92,6 +92,11 @@ * Forking the process is slow because the complete memory needs to be copied manually right now. +* Very long-running processes should eventually crash on an assertion + error because of a non-implemented overflow of an internal 29-bit + number, but this requires at the very least ten hours --- more + probably, several days or more. + .. _`report bugs`: https://bugs.pypy.org/ From noreply at buildbot.pypy.org Wed Apr 2 19:06:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 19:06:48 +0200 (CEST) Subject: [pypy-commit] pypy default: hg backout 04acdc4163f4: revert the change to the docs that killed most Message-ID: <20140402170648.F19161C320C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70399:21d1db01f2d3 Date: 2014-04-02 19:04 +0200 http://bitbucket.org/pypy/pypy/changeset/21d1db01f2d3/ Log: hg backout 04acdc4163f4: revert the change to the docs that killed most of the reasoning for why we *still* think LLVM is unlikely to be a good idea. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -429,12 +429,25 @@ Could we use LLVM? ------------------ -There is a (static) translation backend using LLVM in the branch -``llvm-translation-backend``. It can translate PyPy with or without the JIT on -Linux. +In theory yes. But we tried to use it 5 or 6 times already, as a +translation backend or as a JIT backend --- and failed each time. -Using LLVM as our JIT backend looks interesting as well -- we made an attempt, -but it failed: LLVM has no way to patch the generated machine code. +In more details: using LLVM as a (static) translation backend is +pointless nowadays because you can generate C code and compile it with +clang. (Note that compiling PyPy with clang gives a result that is not +faster than compiling it with gcc.) We might in theory get extra +benefits from LLVM's GC integration, but this requires more work on the +LLVM side before it would be remotely useful. Anyway, it could be +interfaced via a custom primitive in the C code. + +On the other hand, using LLVM as our JIT backend looks interesting as +well --- but again we made an attempt, and it failed: LLVM has no way to +patch the generated machine code. + +So the position of the core PyPy developers is that if anyone wants to +make an N+1'th attempt with LLVM, they are welcome, and will be happy to +provide help in the IRC channel, but they are left with the burden of proof +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? From noreply at buildbot.pypy.org Wed Apr 2 19:06:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 19:06:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Mention llvm-translation-backend without killing the rest of the entry. Message-ID: <20140402170650.41BFA1C320C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70400:ac4800c0db8a Date: 2014-04-02 19:06 +0200 http://bitbucket.org/pypy/pypy/changeset/ac4800c0db8a/ Log: Mention llvm-translation-backend without killing the rest of the entry. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -438,7 +438,9 @@ faster than compiling it with gcc.) We might in theory get extra benefits from LLVM's GC integration, but this requires more work on the LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. +interfaced via a custom primitive in the C code. (The latest such +experimental backend is in the branch ``llvm-translation-backend``, +which can translate PyPy with or without the JIT on Linux.) On the other hand, using LLVM as our JIT backend looks interesting as well --- but again we made an attempt, and it failed: LLVM has no way to From noreply at buildbot.pypy.org Wed Apr 2 20:06:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 2 Apr 2014 20:06:10 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: exact error messages are an impl detail. we may revisit these in 3.4, when Message-ID: <20140402180610.0E0381D2732@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70401:fb6aa1ffe82b Date: 2014-04-01 16:43 -0700 http://bitbucket.org/pypy/pypy/changeset/fb6aa1ffe82b/ Log: exact error messages are an impl detail. we may revisit these in 3.4, when cpython changes many of these messages, anyway diff --git a/lib-python/3/test/test_io.py b/lib-python/3/test/test_io.py --- a/lib-python/3/test/test_io.py +++ b/lib-python/3/test/test_io.py @@ -1041,7 +1041,7 @@ def test_args_error(self): # Issue #17275 - with self.assertRaisesRegex(TypeError, "BufferedReader"): + with self.assertRaisesRegex(TypeError, "__init__()"): self.tp(io.BytesIO(), 1024, 1024, 1024) @@ -1329,7 +1329,7 @@ def test_args_error(self): # Issue #17275 - with self.assertRaisesRegex(TypeError, "BufferedWriter"): + with self.assertRaisesRegex(TypeError, "__init__()"): self.tp(io.BytesIO(), 1024, 1024, 1024) @@ -1705,7 +1705,7 @@ def test_args_error(self): # Issue #17275 - with self.assertRaisesRegex(TypeError, "BufferedRandom"): + with self.assertRaisesRegex(TypeError, "__init__()"): self.tp(io.BytesIO(), 1024, 1024, 1024) From noreply at buildbot.pypy.org Wed Apr 2 20:07:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 2 Apr 2014 20:07:11 +0200 (CEST) Subject: [pypy-commit] pypy py3k: move a misplaced test Message-ID: <20140402180711.C00B91D2732@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70402:4ed2c44cfd6d Date: 2014-04-01 16:56 -0700 http://bitbucket.org/pypy/pypy/changeset/4ed2c44cfd6d/ Log: move a misplaced test diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -369,6 +369,17 @@ t = _io.TextIOWrapper(NonbytesStream(u'a')) raises(TypeError, t.read) + def test_device_encoding(self): + import os + import sys + encoding = os.device_encoding(sys.stderr.fileno()) + if not encoding: + skip("Requires a result from " + "os.device_encoding(sys.stderr.fileno())") + import _io + f = _io.TextIOWrapper(sys.stderr.buffer) + assert f.encoding == encoding + class AppTestIncrementalNewlineDecoder: def test_newline_decoder(self): @@ -477,14 +488,3 @@ _check(dec) dec = _io.IncrementalNewlineDecoder(None, translate=True) _check(dec) - - def test_device_encoding(self): - import os - import sys - encoding = os.device_encoding(sys.stderr.fileno()) - if not encoding: - skip("Requires a result from " - "os.device_encoding(sys.stderr.fileno())") - import _io - f = _io.TextIOWrapper(sys.stderr.buffer) - assert f.encoding == encoding From noreply at buildbot.pypy.org Wed Apr 2 20:23:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 2 Apr 2014 20:23:12 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140402182312.3E6011D23F9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70403:10edbabbaae9 Date: 2014-04-02 11:22 -0700 http://bitbucket.org/pypy/pypy/changeset/10edbabbaae9/ Log: merge default diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,7 +48,11 @@ def tearDown(self): os.chdir(self.old_dir) - shutil.rmtree(self.temp_dir, True) + gc.collect() + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -429,12 +429,27 @@ Could we use LLVM? ------------------ -There is a (static) translation backend using LLVM in the branch -``llvm-translation-backend``. It can translate PyPy with or without the JIT on -Linux. +In theory yes. But we tried to use it 5 or 6 times already, as a +translation backend or as a JIT backend --- and failed each time. -Using LLVM as our JIT backend looks interesting as well -- we made an attempt, -but it failed: LLVM has no way to patch the generated machine code. +In more details: using LLVM as a (static) translation backend is +pointless nowadays because you can generate C code and compile it with +clang. (Note that compiling PyPy with clang gives a result that is not +faster than compiling it with gcc.) We might in theory get extra +benefits from LLVM's GC integration, but this requires more work on the +LLVM side before it would be remotely useful. Anyway, it could be +interfaced via a custom primitive in the C code. (The latest such +experimental backend is in the branch ``llvm-translation-backend``, +which can translate PyPy with or without the JIT on Linux.) + +On the other hand, using LLVM as our JIT backend looks interesting as +well --- but again we made an attempt, and it failed: LLVM has no way to +patch the generated machine code. + +So the position of the core PyPy developers is that if anyone wants to +make an N+1'th attempt with LLVM, they are welcome, and will be happy to +provide help in the IRC channel, but they are left with the burden of proof +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/stm.rst @@ -0,0 +1,244 @@ +====================== +Transactional Memory +====================== + +.. contents:: + + +This page is about ``pypy-stm``, a special in-development version of +PyPy which can run multiple independent CPU-hungry threads in the same +process in parallel. It is side-stepping what is known in the Python +world as the "global interpreter lock (GIL)" problem. + +"STM" stands for Software Transactional Memory, the technique used +internally. This page describes ``pypy-stm`` from the perspective of a +user, describes work in progress, and finally gives references to more +implementation details. + +This work was done by Remi Meier and Armin Rigo. + + +Introduction and current status +=============================== + +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats +listed below, it should be in theory within 25%-50% of the speed of +PyPy, comparing the JITting version in both cases. It is called STM for +Software Transactional Memory, which is the internal technique used (see +`Reference to implementation details`_). + +**pypy-stm requires 64-bit Linux for now.** + +Development is done in the branch `stmgc-c7`_. If you are only +interested in trying it out, you can download a Ubuntu 12.04 binary +here__. The current version supports four "segments", which means that +it will run up to four threads in parallel (in other words, you get a +GIL effect again, but only if trying to execute more than 4 threads). + +To build a version from sources, you first need to compile a custom +version of clang; we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +for all checkouts). Then apply all the patches in `this directory`__: +they are fixes for the very extensive usage that pypy-stm does of a +clang-only feature (without them, you get crashes of clang). Then get +the branch `stmgc-c7`_ of PyPy and run:: + + rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py + +.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +.. __: http://buildbot.pypy.org/nightly/stmgc-c7/ +.. __: http://clang.llvm.org/get_started.html +.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ + + +Caveats: + +* It should generally work. Please do `report bugs`_ that manifest as a + crash or wrong behavior (markedly different from the behavior of a + regular PyPy). Performance bugs are likely to be known issues; we're + working on them. + +* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, + which is "only" bad). Moreover, you should run it with a command like + ``pypy-stm --jit trace_limit=60000 args...``; the default value of + 6000 for ``trace_limit`` is currently too low (6000 should become + reasonable again as we improve). Also, in order to produce machine + code, the JIT needs to enter a special single-threaded mode for now. + This all means that you *will* get very bad performance results if + your program doesn't run for *many* seconds for now. + +* The GC is new; although clearly inspired by PyPy's regular GC, it + misses a number of optimizations for now. Programs allocating large + numbers of small objects that don't immediately die, as well as + programs that modify large lists or dicts, suffer from these missing + optimizations. + +* The GC has no support for destructors: the ``__del__`` method is + never called (including on file objects, which won't be closed for + you). This is of course temporary. + +* The STM system is based on very efficient read/write barriers, which + are mostly done (their placement could be improved a bit in + JIT-generated machine code). But the overall bookkeeping logic could + see more improvements (see Statistics_ below). + +* You can use `atomic sections`_, but the most visible missing thing is + that you don't get reports about the "conflicts" you get. This would + be the first thing that you need in order to start using atomic + sections more extensively. Also, for now: for better results, try to + explicitly force a transaction break just before (and possibly after) + each large atomic section, with ``time.sleep(0)``. + +* Forking the process is slow because the complete memory needs to be + copied manually right now. + +* Very long-running processes should eventually crash on an assertion + error because of a non-implemented overflow of an internal 29-bit + number, but this requires at the very least ten hours --- more + probably, several days or more. + +.. _`report bugs`: https://bugs.pypy.org/ + + + +Statistics +========== + +When a non-main thread finishes, you get statistics printed to stderr, +looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + spin loop 129512 0.094 s + +The first number is a counter; the second number gives the associated +time (the amount of real time that the thread was in this state; the sum +of all the times should be equal to the total time between the thread's +start and the thread's end). The most important points are "run +committed", which gives the amount of useful work, and "outside +transaction", which should give the time spent e.g. in library calls +(right now it seems to be a bit larger than that; to investigate). +Everything else is overhead of various forms. (Short-, medium- and +long-term future work involves reducing this overhead :-) + +These statistics are not printed out for the main thread, for now. + + +Atomic sections +=============== + +While one of the goal of pypy-stm is to give a GIL-free but otherwise +unmodified Python, the other goal is to push for a better way to use +multithreading. For this, you (as the Python programmer) get an API +in the ``__pypy__.thread`` submodule: + +* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in + a ``with __pypy__.thread.atomic:`` statement). It runs the whole + block of code without breaking the current transaction --- from + the point of view of a regular CPython/PyPy, this is equivalent to + saying that the GIL will not be released at all between the start and + the end of this block of code. + +The obvious usage is to use atomic blocks in the same way as one would +use locks: to protect changes to some shared data, you do them in a +``with atomic`` block, just like you would otherwise do them in a ``with +mylock`` block after ``mylock = thread.allocate_lock()``. This allows +you not to care about acquiring the correct locks in the correct order; +it is equivalent to having only one global lock. This is how +transactional memory is `generally described`__: as a way to efficiently +execute such atomic blocks, running them in parallel while giving the +illusion that they run in some serial order. + +.. __: http://en.wikipedia.org/wiki/Transactional_memory + +However, the less obvious intended usage of atomic sections is as a +wide-ranging replacement of explicit threads. You can turn a program +that is not multi-threaded at all into a program that uses threads +internally, together with large atomic sections to keep the behavior +unchanged. This capability can be hidden in a library or in the +framework you use; the end user's code does not need to be explicitly +aware of using threads. For a simple example of this, see +`lib_pypy/transaction.py`_. The idea is that if you have a program +where the function ``f(key, value)`` runs on every item of some big +dictionary, you can replace the loop with:: + + for key, value in bigdict.items(): + transaction.add(f, key, value) + transaction.run() + +This code runs the various calls to ``f(key, value)`` using a thread +pool, but every single call is done in an atomic section. The end +result is that the behavior should be exactly equivalent: you don't get +any extra multithreading issue. + +.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py + +================== + +Other APIs in pypy-stm: + +* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" + in this pypy-stm. This is the limit above which more threads will not + be able to execute on more cores. (Right now it is limited to 4 due + to inter-segment overhead, but should be increased in the future. It + should also be settable, and the default value should depend on the + number of actual CPUs.) + +* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but + raises an exception if you attempt to nest it inside another + ``atomic``. + +* ``__pypy__.thread.signals_enabled``: a context manager that runs + its block with signals enabled. By default, signals are only + enabled in the main thread; a non-main thread will not receive + signals (this is like CPython). Enabling signals in non-main threads + is useful for libraries where threads are hidden and the end user is + not expecting his code to run elsewhere than in the main thread. + +Note that all of this API is (or will be) implemented in a regular PyPy +too: for example, ``with atomic`` will simply mean "don't release the +GIL" and ``getsegmentlimit()`` will return 1. + +================== + + +Reference to implementation details +=================================== + +The core of the implementation is in a separate C library called stmgc_, +in the c7_ subdirectory. Please see the `README.txt`_ for more +information. + +.. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ +.. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ +.. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt + +PyPy itself adds on top of it the automatic placement of read__ and write__ +barriers and of `"becomes-inevitable-now" barriers`__, the logic to +`start/stop transactions as an RPython transformation`__ and as +`supporting`__ `C code`__, and the support in the JIT (mostly as a +`transformation step on the trace`__ and generation of custom assembler +in `assembler.py`__). + +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/readbarrier.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/memory/gctransform/stmframework.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/inevitable.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/jitdriver.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.h +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -127,3 +127,10 @@ .. branch: win32-fixes4 fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field \ No newline at end of file diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -49,13 +49,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) @@ -3090,7 +3096,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def stmt_del_lineno(space, w_self): @@ -3117,7 +3124,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def stmt_del_col_offset(space, w_self): @@ -3155,7 +3163,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def FunctionDef_del_name(space, w_self): @@ -3314,7 +3323,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def ClassDef_del_name(space, w_self): @@ -4635,7 +4645,8 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'module') + # need to save the original object too + w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 def ImportFrom_del_module(space, w_self): @@ -4684,7 +4695,8 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'level') + # need to save the original object too + w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 def ImportFrom_del_level(space, w_self): @@ -4936,7 +4948,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def expr_del_lineno(space, w_self): @@ -4963,7 +4976,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def expr_del_col_offset(space, w_self): @@ -6237,7 +6251,8 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'n') + # need to save the original object too + w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 def Num_del_n(space, w_self): @@ -6288,7 +6303,8 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + # need to save the original object too + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Str_del_s(space, w_self): @@ -6339,7 +6355,8 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + # need to save the original object too + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Bytes_del_s(space, w_self): @@ -6438,7 +6455,8 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'attr') + # need to save the original object too + w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 def Attribute_del_attr(space, w_self): @@ -6718,7 +6736,8 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'id') + # need to save the original object too + w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 def Name_del_id(space, w_self): @@ -6953,7 +6972,8 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + # need to save the original object too + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Const_del_value(space, w_self): @@ -7604,7 +7624,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def excepthandler_del_lineno(space, w_self): @@ -7631,7 +7652,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def excepthandler_del_col_offset(space, w_self): @@ -7701,7 +7723,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 8 def ExceptHandler_del_name(space, w_self): @@ -7804,7 +7827,8 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'vararg') + # need to save the original object too + w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 def arguments_del_vararg(space, w_self): @@ -7887,7 +7911,8 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'kwarg') + # need to save the original object too + w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 16 def arguments_del_kwarg(space, w_self): @@ -8024,7 +8049,8 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + # need to save the original object too + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def arg_del_arg(space, w_self): @@ -8107,7 +8133,8 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + # need to save the original object too + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def keyword_del_arg(space, w_self): @@ -8190,7 +8217,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 def alias_del_name(space, w_self): @@ -8222,7 +8250,8 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'asname') + # need to save the original object too + w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 def alias_del_asname(space, w_self): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -470,6 +470,7 @@ self.emit("raise OperationError(space.w_TypeError, " "space.w_None)", 3) else: + save_original_object = True level = 2 if field.opt and field.type.value != "int": self.emit("if space.is_w(w_new_value, space.w_None):", 2) @@ -607,13 +608,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -403,3 +403,40 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + + def test_dict_astNode(self): + import ast + num_node = ast.Num(n=2, lineno=2, col_offset=3) + dict_res = num_node.__dict__ + + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Num_notfullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 + num_node2 = copy.deepcopy(num_node) + + def test_issue1673_Num_fullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset + dict_res = num_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Str(self): + import ast + import copy + str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node.lineno == 2 + str_node2 = copy.deepcopy(str_node) + dict_res = str_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2} + \ No newline at end of file diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -11,6 +11,9 @@ def run_subprocess(executable, args, env=None, cwd=None): return _run(executable, args, env, cwd) +shell_default = False +if sys.platform == 'win32': + shell_default = True def _run(executable, args, env, cwd): # unless overridden below if isinstance(args, str): @@ -21,7 +24,9 @@ args = [str(executable)] else: args = [str(executable)] + args - shell = False + # shell=True on unix-like is a known security vulnerability, but + # on windows shell=True does not properly propogate the env dict + shell = shell_default # Just before spawning the subprocess, do a gc.collect(). This # should help if we are running on top of PyPy, if the subprocess diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -414,7 +414,8 @@ try: returncode, stdout, stderr = _run_subprocess( 'nmake', - ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts) + ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts, + env = self.c_environ) finally: oldcwd.chdir() From noreply at buildbot.pypy.org Wed Apr 2 20:23:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 2 Apr 2014 20:23:13 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: merge py3k Message-ID: <20140402182313.AE9401D23F9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70404:b7947bb0d943 Date: 2014-04-02 11:22 -0700 http://bitbucket.org/pypy/pypy/changeset/b7947bb0d943/ Log: merge py3k diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,7 +48,11 @@ def tearDown(self): os.chdir(self.old_dir) - shutil.rmtree(self.temp_dir, True) + gc.collect() + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -429,12 +429,27 @@ Could we use LLVM? ------------------ -There is a (static) translation backend using LLVM in the branch -``llvm-translation-backend``. It can translate PyPy with or without the JIT on -Linux. +In theory yes. But we tried to use it 5 or 6 times already, as a +translation backend or as a JIT backend --- and failed each time. -Using LLVM as our JIT backend looks interesting as well -- we made an attempt, -but it failed: LLVM has no way to patch the generated machine code. +In more details: using LLVM as a (static) translation backend is +pointless nowadays because you can generate C code and compile it with +clang. (Note that compiling PyPy with clang gives a result that is not +faster than compiling it with gcc.) We might in theory get extra +benefits from LLVM's GC integration, but this requires more work on the +LLVM side before it would be remotely useful. Anyway, it could be +interfaced via a custom primitive in the C code. (The latest such +experimental backend is in the branch ``llvm-translation-backend``, +which can translate PyPy with or without the JIT on Linux.) + +On the other hand, using LLVM as our JIT backend looks interesting as +well --- but again we made an attempt, and it failed: LLVM has no way to +patch the generated machine code. + +So the position of the core PyPy developers is that if anyone wants to +make an N+1'th attempt with LLVM, they are welcome, and will be happy to +provide help in the IRC channel, but they are left with the burden of proof +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/stm.rst @@ -0,0 +1,244 @@ +====================== +Transactional Memory +====================== + +.. contents:: + + +This page is about ``pypy-stm``, a special in-development version of +PyPy which can run multiple independent CPU-hungry threads in the same +process in parallel. It is side-stepping what is known in the Python +world as the "global interpreter lock (GIL)" problem. + +"STM" stands for Software Transactional Memory, the technique used +internally. This page describes ``pypy-stm`` from the perspective of a +user, describes work in progress, and finally gives references to more +implementation details. + +This work was done by Remi Meier and Armin Rigo. + + +Introduction and current status +=============================== + +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats +listed below, it should be in theory within 25%-50% of the speed of +PyPy, comparing the JITting version in both cases. It is called STM for +Software Transactional Memory, which is the internal technique used (see +`Reference to implementation details`_). + +**pypy-stm requires 64-bit Linux for now.** + +Development is done in the branch `stmgc-c7`_. If you are only +interested in trying it out, you can download a Ubuntu 12.04 binary +here__. The current version supports four "segments", which means that +it will run up to four threads in parallel (in other words, you get a +GIL effect again, but only if trying to execute more than 4 threads). + +To build a version from sources, you first need to compile a custom +version of clang; we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +for all checkouts). Then apply all the patches in `this directory`__: +they are fixes for the very extensive usage that pypy-stm does of a +clang-only feature (without them, you get crashes of clang). Then get +the branch `stmgc-c7`_ of PyPy and run:: + + rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py + +.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +.. __: http://buildbot.pypy.org/nightly/stmgc-c7/ +.. __: http://clang.llvm.org/get_started.html +.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ + + +Caveats: + +* It should generally work. Please do `report bugs`_ that manifest as a + crash or wrong behavior (markedly different from the behavior of a + regular PyPy). Performance bugs are likely to be known issues; we're + working on them. + +* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, + which is "only" bad). Moreover, you should run it with a command like + ``pypy-stm --jit trace_limit=60000 args...``; the default value of + 6000 for ``trace_limit`` is currently too low (6000 should become + reasonable again as we improve). Also, in order to produce machine + code, the JIT needs to enter a special single-threaded mode for now. + This all means that you *will* get very bad performance results if + your program doesn't run for *many* seconds for now. + +* The GC is new; although clearly inspired by PyPy's regular GC, it + misses a number of optimizations for now. Programs allocating large + numbers of small objects that don't immediately die, as well as + programs that modify large lists or dicts, suffer from these missing + optimizations. + +* The GC has no support for destructors: the ``__del__`` method is + never called (including on file objects, which won't be closed for + you). This is of course temporary. + +* The STM system is based on very efficient read/write barriers, which + are mostly done (their placement could be improved a bit in + JIT-generated machine code). But the overall bookkeeping logic could + see more improvements (see Statistics_ below). + +* You can use `atomic sections`_, but the most visible missing thing is + that you don't get reports about the "conflicts" you get. This would + be the first thing that you need in order to start using atomic + sections more extensively. Also, for now: for better results, try to + explicitly force a transaction break just before (and possibly after) + each large atomic section, with ``time.sleep(0)``. + +* Forking the process is slow because the complete memory needs to be + copied manually right now. + +* Very long-running processes should eventually crash on an assertion + error because of a non-implemented overflow of an internal 29-bit + number, but this requires at the very least ten hours --- more + probably, several days or more. + +.. _`report bugs`: https://bugs.pypy.org/ + + + +Statistics +========== + +When a non-main thread finishes, you get statistics printed to stderr, +looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + spin loop 129512 0.094 s + +The first number is a counter; the second number gives the associated +time (the amount of real time that the thread was in this state; the sum +of all the times should be equal to the total time between the thread's +start and the thread's end). The most important points are "run +committed", which gives the amount of useful work, and "outside +transaction", which should give the time spent e.g. in library calls +(right now it seems to be a bit larger than that; to investigate). +Everything else is overhead of various forms. (Short-, medium- and +long-term future work involves reducing this overhead :-) + +These statistics are not printed out for the main thread, for now. + + +Atomic sections +=============== + +While one of the goal of pypy-stm is to give a GIL-free but otherwise +unmodified Python, the other goal is to push for a better way to use +multithreading. For this, you (as the Python programmer) get an API +in the ``__pypy__.thread`` submodule: + +* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in + a ``with __pypy__.thread.atomic:`` statement). It runs the whole + block of code without breaking the current transaction --- from + the point of view of a regular CPython/PyPy, this is equivalent to + saying that the GIL will not be released at all between the start and + the end of this block of code. + +The obvious usage is to use atomic blocks in the same way as one would +use locks: to protect changes to some shared data, you do them in a +``with atomic`` block, just like you would otherwise do them in a ``with +mylock`` block after ``mylock = thread.allocate_lock()``. This allows +you not to care about acquiring the correct locks in the correct order; +it is equivalent to having only one global lock. This is how +transactional memory is `generally described`__: as a way to efficiently +execute such atomic blocks, running them in parallel while giving the +illusion that they run in some serial order. + +.. __: http://en.wikipedia.org/wiki/Transactional_memory + +However, the less obvious intended usage of atomic sections is as a +wide-ranging replacement of explicit threads. You can turn a program +that is not multi-threaded at all into a program that uses threads +internally, together with large atomic sections to keep the behavior +unchanged. This capability can be hidden in a library or in the +framework you use; the end user's code does not need to be explicitly +aware of using threads. For a simple example of this, see +`lib_pypy/transaction.py`_. The idea is that if you have a program +where the function ``f(key, value)`` runs on every item of some big +dictionary, you can replace the loop with:: + + for key, value in bigdict.items(): + transaction.add(f, key, value) + transaction.run() + +This code runs the various calls to ``f(key, value)`` using a thread +pool, but every single call is done in an atomic section. The end +result is that the behavior should be exactly equivalent: you don't get +any extra multithreading issue. + +.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py + +================== + +Other APIs in pypy-stm: + +* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" + in this pypy-stm. This is the limit above which more threads will not + be able to execute on more cores. (Right now it is limited to 4 due + to inter-segment overhead, but should be increased in the future. It + should also be settable, and the default value should depend on the + number of actual CPUs.) + +* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but + raises an exception if you attempt to nest it inside another + ``atomic``. + +* ``__pypy__.thread.signals_enabled``: a context manager that runs + its block with signals enabled. By default, signals are only + enabled in the main thread; a non-main thread will not receive + signals (this is like CPython). Enabling signals in non-main threads + is useful for libraries where threads are hidden and the end user is + not expecting his code to run elsewhere than in the main thread. + +Note that all of this API is (or will be) implemented in a regular PyPy +too: for example, ``with atomic`` will simply mean "don't release the +GIL" and ``getsegmentlimit()`` will return 1. + +================== + + +Reference to implementation details +=================================== + +The core of the implementation is in a separate C library called stmgc_, +in the c7_ subdirectory. Please see the `README.txt`_ for more +information. + +.. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ +.. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ +.. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt + +PyPy itself adds on top of it the automatic placement of read__ and write__ +barriers and of `"becomes-inevitable-now" barriers`__, the logic to +`start/stop transactions as an RPython transformation`__ and as +`supporting`__ `C code`__, and the support in the JIT (mostly as a +`transformation step on the trace`__ and generation of custom assembler +in `assembler.py`__). + +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/readbarrier.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/memory/gctransform/stmframework.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/inevitable.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/jitdriver.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.h +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -127,3 +127,10 @@ .. branch: win32-fixes4 fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field \ No newline at end of file diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -49,13 +49,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) @@ -3090,7 +3096,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def stmt_del_lineno(space, w_self): @@ -3117,7 +3124,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def stmt_del_col_offset(space, w_self): @@ -3155,7 +3163,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def FunctionDef_del_name(space, w_self): @@ -3314,7 +3323,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def ClassDef_del_name(space, w_self): @@ -4635,7 +4645,8 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'module') + # need to save the original object too + w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 def ImportFrom_del_module(space, w_self): @@ -4684,7 +4695,8 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'level') + # need to save the original object too + w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 def ImportFrom_del_level(space, w_self): @@ -4936,7 +4948,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def expr_del_lineno(space, w_self): @@ -4963,7 +4976,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def expr_del_col_offset(space, w_self): @@ -6237,7 +6251,8 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'n') + # need to save the original object too + w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 def Num_del_n(space, w_self): @@ -6288,7 +6303,8 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + # need to save the original object too + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Str_del_s(space, w_self): @@ -6339,7 +6355,8 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + # need to save the original object too + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Bytes_del_s(space, w_self): @@ -6438,7 +6455,8 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'attr') + # need to save the original object too + w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 def Attribute_del_attr(space, w_self): @@ -6718,7 +6736,8 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'id') + # need to save the original object too + w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 def Name_del_id(space, w_self): @@ -6953,7 +6972,8 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + # need to save the original object too + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Const_del_value(space, w_self): @@ -7604,7 +7624,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def excepthandler_del_lineno(space, w_self): @@ -7631,7 +7652,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def excepthandler_del_col_offset(space, w_self): @@ -7701,7 +7723,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 8 def ExceptHandler_del_name(space, w_self): @@ -7804,7 +7827,8 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'vararg') + # need to save the original object too + w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 def arguments_del_vararg(space, w_self): @@ -7887,7 +7911,8 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'kwarg') + # need to save the original object too + w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 16 def arguments_del_kwarg(space, w_self): @@ -8024,7 +8049,8 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + # need to save the original object too + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def arg_del_arg(space, w_self): @@ -8107,7 +8133,8 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + # need to save the original object too + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def keyword_del_arg(space, w_self): @@ -8190,7 +8217,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 def alias_del_name(space, w_self): @@ -8222,7 +8250,8 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'asname') + # need to save the original object too + w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 def alias_del_asname(space, w_self): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -470,6 +470,7 @@ self.emit("raise OperationError(space.w_TypeError, " "space.w_None)", 3) else: + save_original_object = True level = 2 if field.opt and field.type.value != "int": self.emit("if space.is_w(w_new_value, space.w_None):", 2) @@ -607,13 +608,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -403,3 +403,40 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + + def test_dict_astNode(self): + import ast + num_node = ast.Num(n=2, lineno=2, col_offset=3) + dict_res = num_node.__dict__ + + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Num_notfullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 + num_node2 = copy.deepcopy(num_node) + + def test_issue1673_Num_fullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset + dict_res = num_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Str(self): + import ast + import copy + str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node.lineno == 2 + str_node2 = copy.deepcopy(str_node) + dict_res = str_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2} + \ No newline at end of file diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -369,6 +369,17 @@ t = _io.TextIOWrapper(NonbytesStream(u'a')) raises(TypeError, t.read) + def test_device_encoding(self): + import os + import sys + encoding = os.device_encoding(sys.stderr.fileno()) + if not encoding: + skip("Requires a result from " + "os.device_encoding(sys.stderr.fileno())") + import _io + f = _io.TextIOWrapper(sys.stderr.buffer) + assert f.encoding == encoding + class AppTestIncrementalNewlineDecoder: def test_newline_decoder(self): @@ -477,14 +488,3 @@ _check(dec) dec = _io.IncrementalNewlineDecoder(None, translate=True) _check(dec) - - def test_device_encoding(self): - import os - import sys - encoding = os.device_encoding(sys.stderr.fileno()) - if not encoding: - skip("Requires a result from " - "os.device_encoding(sys.stderr.fileno())") - import _io - f = _io.TextIOWrapper(sys.stderr.buffer) - assert f.encoding == encoding diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -11,6 +11,9 @@ def run_subprocess(executable, args, env=None, cwd=None): return _run(executable, args, env, cwd) +shell_default = False +if sys.platform == 'win32': + shell_default = True def _run(executable, args, env, cwd): # unless overridden below if isinstance(args, str): @@ -21,7 +24,9 @@ args = [str(executable)] else: args = [str(executable)] + args - shell = False + # shell=True on unix-like is a known security vulnerability, but + # on windows shell=True does not properly propogate the env dict + shell = shell_default # Just before spawning the subprocess, do a gc.collect(). This # should help if we are running on top of PyPy, if the subprocess diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -414,7 +414,8 @@ try: returncode, stdout, stderr = _run_subprocess( 'nmake', - ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts) + ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts, + env = self.c_environ) finally: oldcwd.chdir() From noreply at buildbot.pypy.org Wed Apr 2 21:51:50 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 21:51:50 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Fix BytesSetStrategy: bytes items would be converted to unicode! Message-ID: <20140402195150.D8DFC1D2A9A@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r70405:7c594170c5be Date: 2014-04-02 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/7c594170c5be/ Log: Fix BytesSetStrategy: bytes items would be converted to unicode! diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1202,10 +1202,10 @@ return True def unwrap(self, w_item): - return self.space.str_w(w_item) + return self.space.bytes_w(w_item) def wrap(self, item): - return self.space.wrap(item) + return self.space.wrapbytes(item) def iter(self, w_set): return BytesIteratorImplementation(self.space, self, w_set) diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -348,6 +348,11 @@ s = set([b'hello']) assert s.pop() == b'hello' + def test_set_literal(self): + """ + assert {b'a'}.pop() == b'a' + """ + def test_compare(self): assert set('abc') != 'abc' raises(TypeError, "set('abc') < 42") From noreply at buildbot.pypy.org Wed Apr 2 22:00:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 2 Apr 2014 22:00:40 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: fix a c int overflow. refs cpython issue15989 Message-ID: <20140402200040.5D3891D2A9B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70406:66760d2d4d71 Date: 2014-04-02 12:59 -0700 http://bitbucket.org/pypy/pypy/changeset/66760d2d4d71/ Log: fix a c int overflow. refs cpython issue15989 diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -1,6 +1,12 @@ class AppTestTextIO: spaceconfig = dict(usemodules=['_io', '_locale']) + def setup_class(cls): + from rpython.rlib.rarithmetic import INT_MAX, UINT_MAX + space = cls.space + cls.w_INT_MAX = space.wrap(INT_MAX) + cls.w_UINT_MAX = space.wrap(UINT_MAX) + def test_constructor(self): import _io r = _io.BytesIO(b"\xc3\xa9\n\n") @@ -380,6 +386,14 @@ f = _io.TextIOWrapper(sys.stderr.buffer) assert f.encoding == encoding + def test_device_encoding_ovf(self): + import _io + b = _io.BytesIO() + b.fileno = lambda: self.INT_MAX + 1 + raises(OverflowError, _io.TextIOWrapper, b) + b.fileno = lambda: self.UINT_MAX + 1 + raises(OverflowError, _io.TextIOWrapper, b) + class AppTestIncrementalNewlineDecoder: def test_newline_decoder(self): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1353,7 +1353,7 @@ """ return space.wrap(os.ctermid()) - at unwrap_spec(fd=int) + at unwrap_spec(fd=c_int) def device_encoding(space, fd): """device_encoding(fd) -> str From noreply at buildbot.pypy.org Wed Apr 2 22:24:41 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 22:24:41 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: CPython 3.2.5 seems to be more careful when unwrapping file descriptors. Message-ID: <20140402202441.F2D8C1D23F9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70407:c3d1b11d8ccd Date: 2014-04-02 22:20 +0200 http://bitbucket.org/pypy/pypy/changeset/c3d1b11d8ccd/ Log: CPython 3.2.5 seems to be more careful when unwrapping file descriptors. Do the same in pypy, and don't turn OverflowErrors into ValueErrors. This will fix a failure in test_fcntl, but will probably break other tests. Need to watch buildbot. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1606,13 +1606,7 @@ raise OperationError(self.w_TypeError, self.wrap("fileno() returned a non-integer") ) - try: - fd = self.c_int_w(w_fd) - except OperationError, e: - if e.match(self, self.w_OverflowError): - fd = -1 - else: - raise + fd = self.c_int_w(w_fd) # Can raise w_OverflowError if fd < 0: raise oefmt(self.w_ValueError, "file descriptor cannot be a negative integer (%d)", fd) From noreply at buildbot.pypy.org Wed Apr 2 22:25:32 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 22:25:32 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: hg merge py3k Message-ID: <20140402202532.226EE1D23F9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70408:1984a39792cd Date: 2014-04-02 22:24 +0200 http://bitbucket.org/pypy/pypy/changeset/1984a39792cd/ Log: hg merge py3k diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1202,10 +1202,10 @@ return True def unwrap(self, w_item): - return self.space.str_w(w_item) + return self.space.bytes_w(w_item) def wrap(self, item): - return self.space.wrap(item) + return self.space.wrapbytes(item) def iter(self, w_set): return BytesIteratorImplementation(self.space, self, w_set) diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -348,6 +348,11 @@ s = set([b'hello']) assert s.pop() == b'hello' + def test_set_literal(self): + """ + assert {b'a'}.pop() == b'a' + """ + def test_compare(self): assert set('abc') != 'abc' raises(TypeError, "set('abc') < 42") From noreply at buildbot.pypy.org Wed Apr 2 22:49:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 22:49:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Update Message-ID: <20140402204927.CF7151D25BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70409:ec66fcad5d69 Date: 2014-04-02 22:43 +0200 http://bitbucket.org/pypy/pypy/changeset/ec66fcad5d69/ Log: Update diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -105,7 +105,11 @@ class _ThreadPool(object): def __init__(self): - self.num_threads = 4 # XXX default value, tweak + try: + from __pypy__.thread import getsegmentlimit + self.num_threads = getsegmentlimit() + except ImportError: + self.num_threads = 4 self.in_transaction = False self.transactions_run = None From noreply at buildbot.pypy.org Wed Apr 2 22:49:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 22:49:29 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add an XXX: the -Ojit option currently implies --stm in the stm branch, but it shouldn't be the case in general Message-ID: <20140402204929.306671D25BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70410:c0394cc70841 Date: 2014-04-02 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/c0394cc70841/ Log: Add an XXX: the -Ojit option currently implies --stm in the stm branch, but it shouldn't be the case in general diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -331,7 +331,7 @@ 'mem': DEFL_GC + ' lowinline remove_asserts removetypeptr', '2': DEFL_GC + ' extraopts', '3': DEFL_GC + ' extraopts remove_asserts', - 'jit': 'stmgc extraopts jit stm', + 'jit': 'stmgc extraopts jit stm', # XXX STM TEMPORARY } def set_opt_level(config, level): From noreply at buildbot.pypy.org Wed Apr 2 22:49:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 22:49:30 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Argh argh argh. Message-ID: <20140402204930.A7ECE1D25BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70411:fa48683fb7c7 Date: 2014-04-02 22:46 +0200 http://bitbucket.org/pypy/pypy/changeset/fa48683fb7c7/ Log: Argh argh argh. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2463,7 +2463,12 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_adr1-1, chr(offset)) # write down the tid, but not if it's the result of the CALL - self.mc.MOV(mem(self.SEGMENT_GC, eax, 0), imm(arraydescr.tid)) + if self.cpu.gc_ll_descr.stm: + assert IS_X86_64 + self.mc.MOV32(mem(self.SEGMENT_GC, eax, rstm.tid_offset), + imm(arraydescr.tid)) + else: + self.mc.MOV(mem(self.SEGMENT_GC, eax, 0), imm(arraydescr.tid)) # while we're at it, this line is not needed if we've done the CALL self.mc.MOV(heap(self.SEGMENT_GC, nursery_free_adr), edi) # From noreply at buildbot.pypy.org Wed Apr 2 22:49:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Apr 2014 22:49:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fixes on 32-bit Message-ID: <20140402204932.1161D1D25BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70412:907b74dcb682 Date: 2014-04-02 22:48 +0200 http://bitbucket.org/pypy/pypy/changeset/907b74dcb682/ Log: fixes on 32-bit diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -84,7 +84,7 @@ return self.type == FLOAT def add_offset(self, ofs): - return RawEbpLoc(self.value + ofs) + return RawEbpLoc(self.segment, self.value + ofs) def is_stack(self): return True @@ -283,7 +283,7 @@ result = instantiate(AddressLoc) result._location_code = self._location_code if self._location_code == 'm': - result.loc_m = self.loc_m[:2] + (self.loc_m[2] + ofs) + result.loc_m = self.loc_m[:2] + (self.loc_m[2] + ofs,) elif self._location_code == 'a': result.loc_a = self.loc_a[:4] + (self.loc_a[4] + ofs,) elif self._location_code == 'j': From noreply at buildbot.pypy.org Wed Apr 2 23:49:02 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 2 Apr 2014 23:49:02 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: Fix translation Message-ID: <20140402214902.E5D891D25BB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70413:a2f27f0864c5 Date: 2014-04-02 23:47 +0200 http://bitbucket.org/pypy/pypy/changeset/a2f27f0864c5/ Log: Fix translation diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -306,7 +306,7 @@ for i, default in enumerate(args.kw_defaults): if default: kwonly = args.kwonlyargs[i] - mangled = self.scope.mangle(kwonly.arg.decode('utf-8')) + mangled = self.scope.mangle(kwonly.arg).decode('utf-8') self.load_const(self.space.wrap(mangled)) default.walkabout(self) defaults += 1 From noreply at buildbot.pypy.org Thu Apr 3 01:23:25 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 3 Apr 2014 01:23:25 +0200 (CEST) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140402232325.16F951C320C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70414:9745f0e775c3 Date: 2014-04-02 16:22 -0700 http://bitbucket.org/pypy/pypy/changeset/9745f0e775c3/ Log: py3k compat diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -19,5 +19,5 @@ fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) imp.load_module('_ctypes_test', fp, filename, description) except ImportError: - print 'could not find _ctypes_test in',output_dir + print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) From noreply at buildbot.pypy.org Thu Apr 3 01:42:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 3 Apr 2014 01:42:45 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: reapply 4899736f152f to lib-python/3/ (no sys.getsizeof on pypy) Message-ID: <20140402234245.A73FA1C320C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70415:e1402dbb190e Date: 2014-04-02 16:41 -0700 http://bitbucket.org/pypy/pypy/changeset/e1402dbb190e/ Log: reapply 4899736f152f to lib-python/3/ (no sys.getsizeof on pypy) diff --git a/lib-python/3/test/test_marshal.py b/lib-python/3/test/test_marshal.py --- a/lib-python/3/test/test_marshal.py +++ b/lib-python/3/test/test_marshal.py @@ -293,13 +293,13 @@ self.check_unmarshallable([None] * size) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12, # + sys.getsizeof(LARGE_SIZE-1), dry_run=False) def test_set(self, size): self.check_unmarshallable(set(range(size))) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12, # + sys.getsizeof(LARGE_SIZE-1), dry_run=False) def test_frozenset(self, size): self.check_unmarshallable(frozenset(range(size))) From noreply at buildbot.pypy.org Thu Apr 3 01:46:23 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 3 Apr 2014 01:46:23 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: we need a collect here Message-ID: <20140402234623.A053A1C320C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70416:ebd9a9125c8c Date: 2014-04-02 16:45 -0700 http://bitbucket.org/pypy/pypy/changeset/ebd9a9125c8c/ Log: we need a collect here diff --git a/lib-python/3/test/test_weakref.py b/lib-python/3/test/test_weakref.py --- a/lib-python/3/test/test_weakref.py +++ b/lib-python/3/test/test_weakref.py @@ -1153,6 +1153,7 @@ self.assertEqual(dict, ddict) with testcontext() as (k, v): dict.clear() + gc_collect() self.assertEqual(len(dict), 0) def test_weak_keys_destroy_while_iterating(self): From noreply at buildbot.pypy.org Thu Apr 3 11:31:08 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:08 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Enabled parallel forking using new Primitive 787 and rthread. Race conditions out there! Message-ID: <20140403093108.58E3A1C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r741:a4ae5038270e Date: 2013-12-18 15:38 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a4ae5038270e/ Log: Enabled parallel forking using new Primitive 787 and rthread. Race conditions out there! diff too long, truncating to 2000 out of 81690 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu Apr 3 11:31:09 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:09 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Spinlock as RThread_Lock replacement. Continuation of parent thread still not working. Message-ID: <20140403093109.82C1F1C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r742:d6f88032bd75 Date: 2014-01-06 15:15 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d6f88032bd75/ Log: Spinlock as RThread_Lock replacement. Continuation of parent thread still not working. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -37,6 +37,7 @@ def __init__(self): #self.lock = rthread.allocate_lock() + self.lock = 0 # critical values, only modify under lock: self.interp = None @@ -46,6 +47,11 @@ # wait for previous thread to start, then set global state def acquire(interp, w_frame): #bootstrapper.lock.acquire(True) + while bootstrapper.lock: + rstm.should_break_transaction() + rstm.jit_stm_transaction_break_point(True) + bootstrapper.lock = 1 + bootstrapper.interp = interp bootstrapper.w_frame = w_frame @@ -56,6 +62,7 @@ bootstrapper.interp = None bootstrapper.w_frame = None #bootstrapper.lock.release() + bootstrapper.lock = 0 release = staticmethod(release) @@ -65,8 +72,8 @@ #rthread.gc_thread_start() interp = bootstrapper.interp w_frame = bootstrapper.w_frame - assert isinstance(interp, Interpreter) - assert isinstance(w_frame, model.W_PointersObject) + assert isinstance(interp, Interpreter), "Race-condition exploded!" + assert isinstance(w_frame, model.W_PointersObject), "Race-condition exploded!" bootstrapper.num_threads += 1 bootstrapper.release() From noreply at buildbot.pypy.org Thu Apr 3 11:31:10 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Add build script vm.py | disabled some weakrefs/virtualizables for later JIT usage Message-ID: <20140403093110.8C52B1C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r743:d3452dae8498 Date: 2014-01-07 15:12 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d3452dae8498/ Log: Add build script vm.py | disabled some weakrefs/virtualizables for later JIT usage diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,5 +1,6 @@ import py import os +import time from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter @@ -36,8 +37,8 @@ """ def __init__(self): - #self.lock = rthread.allocate_lock() - self.lock = 0 + self.lock = rthread.allocate_lock() + #self.lock = 0 # critical values, only modify under lock: self.interp = None @@ -46,11 +47,16 @@ # wait for previous thread to start, then set global state def acquire(interp, w_frame): - #bootstrapper.lock.acquire(True) - while bootstrapper.lock: - rstm.should_break_transaction() - rstm.jit_stm_transaction_break_point(True) - bootstrapper.lock = 1 + bootstrapper.lock.acquire(True) + #while bootstrapper.lock: + # rstm.should_break_transaction() + # rstm.jit_stm_transaction_break_point(True) + # if bootstrapper.lock: + # print "Waiting for lock" + # time.sleep(100) + # if bootstrapper.lock: + # print "Overriding lock!" + #bootstrapper.lock = 1 bootstrapper.interp = interp bootstrapper.w_frame = w_frame @@ -61,8 +67,8 @@ def release(): bootstrapper.interp = None bootstrapper.w_frame = None - #bootstrapper.lock.release() - bootstrapper.lock = 0 + bootstrapper.lock.release() + #bootstrapper.lock = 0 release = staticmethod(release) @@ -73,7 +79,7 @@ interp = bootstrapper.interp w_frame = bootstrapper.w_frame assert isinstance(interp, Interpreter), "Race-condition exploded!" - assert isinstance(w_frame, model.W_PointersObject), "Race-condition exploded!" + assert isinstance(w_frame, model.W_PointersObject), "Race-condition exploded!l" bootstrapper.num_threads += 1 bootstrapper.release() @@ -97,7 +103,7 @@ jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], reds=['s_context'], - virtualizables=['s_context'], + # virtualizables=['s_context'], get_printable_location=get_printable_location ) @@ -185,7 +191,6 @@ def c_loop(self, s_context, may_context_switch=True): #rstm.should_break_transaction() old_pc = 0 - last_breakpoint = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) @@ -204,7 +209,6 @@ pc=pc, self=self, method=method, s_context=s_context) try: - last_breakpoint += 1 self.step(s_context) except Return, nlr: if nlr.s_target_context is not s_context: @@ -220,10 +224,6 @@ print "Interpreter loop about to fork" self.fork(f.w_frame) - if last_breakpoint >= 1000: - rstm.jit_stm_transaction_break_point(True) - last_breakpoint = 0 - def _get_adapted_tick_counter(self): # Normally, the tick counter is decremented by 1 for every message send. From noreply at buildbot.pypy.org Thu Apr 3 11:31:20 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Build script++; Virtualizables--; JIT kaputt Message-ID: <20140403093120.021DF1C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r744:040feeb63de9 Date: 2014-01-07 15:16 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/040feeb63de9/ Log: Build script++; Virtualizables--; JIT kaputt diff too long, truncating to 2000 out of 82237 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu Apr 3 11:31:21 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:21 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: STM-Aware lock replacement in place (atomic test_and_set) Message-ID: <20140403093121.353211C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r745:3bbfc63068b5 Date: 2014-01-07 18:27 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/3bbfc63068b5/ Log: STM-Aware lock replacement in place (atomic test_and_set) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -28,6 +28,30 @@ return '%d: [%s]%s (%s)' % (pc, hex(bc), BYTECODE_NAMES[bc], name) +class SpinLock(object): + """Replacement when rthread.Lock is not working""" + + def __init__(self, initial=0): + assert initial == 0 or initial == 1 + self._value = initial + + def _test_and_set(self): + rstm.increment_atomic() + old_value = self._value + self._value = 1 + rstm.decrement_atomic() + return old_value + + def wait(self): + while self._test_and_set(): + time.sleep(0.001) + rstm.should_break_transaction() + + def signal(self): + self._value = 0 + rstm.should_break_transaction() + + class Bootstrapper(object): """ @@ -37,8 +61,8 @@ """ def __init__(self): - self.lock = rthread.allocate_lock() - #self.lock = 0 + #self.lock = rthread.allocate_lock() + self.lock = SpinLock() # critical values, only modify under lock: self.interp = None @@ -47,16 +71,8 @@ # wait for previous thread to start, then set global state def acquire(interp, w_frame): - bootstrapper.lock.acquire(True) - #while bootstrapper.lock: - # rstm.should_break_transaction() - # rstm.jit_stm_transaction_break_point(True) - # if bootstrapper.lock: - # print "Waiting for lock" - # time.sleep(100) - # if bootstrapper.lock: - # print "Overriding lock!" - #bootstrapper.lock = 1 + #bootstrapper.lock.acquire(True) + bootstrapper.lock.wait() bootstrapper.interp = interp bootstrapper.w_frame = w_frame @@ -67,15 +83,14 @@ def release(): bootstrapper.interp = None bootstrapper.w_frame = None - bootstrapper.lock.release() - #bootstrapper.lock = 0 + bootstrapper.lock.signal() + #bootstrapper.lock.release() release = staticmethod(release) # HUGE RACE CONDITON!!! def bootstrap(): print "New thread reporting" - #rthread.gc_thread_start() interp = bootstrapper.interp w_frame = bootstrapper.w_frame assert isinstance(interp, Interpreter), "Race-condition exploded!" @@ -88,7 +103,6 @@ # cleanup bootstrapper.num_threads -= 1 - #rthread.gc_thread_die() bootstrap = staticmethod(bootstrap) From noreply at buildbot.pypy.org Thu Apr 3 11:31:30 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: BROKEN: implemented wait on STMProcess. Segfaults at _sendSelfSelector. Message-ID: <20140403093130.B24C31C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r746:eb047d8a7d72 Date: 2014-01-13 00:12 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/eb047d8a7d72/ Log: BROKEN: implemented wait on STMProcess. Segfaults at _sendSelfSelector. diff too long, truncating to 2000 out of 82654 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu Apr 3 11:31:40 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: BROKEN: Added breakpoint in c_loop. No jit. Message-ID: <20140403093140.2DC041C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r747:70acd7f429ae Date: 2014-01-14 12:56 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/70acd7f429ae/ Log: BROKEN: Added breakpoint in c_loop. No jit. diff too long, truncating to 2000 out of 82703 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu Apr 3 11:31:41 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:31:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: new try Message-ID: <20140403093141.5CBFC1C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r748:83d30531d7fb Date: 2014-01-14 13:24 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/83d30531d7fb/ Log: new try diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -133,6 +133,7 @@ greens=[], reds=['pc', 's_context', 'self', 'method'], virtualizables=['s_context'], + stm_do_transaction_breaks=True # get_printable_location=get_printable_location ) @@ -226,7 +227,7 @@ if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.s_method() - while not rstm.should_break_transaction(): + while True: pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): @@ -236,11 +237,14 @@ pc=pc, self=self, method=method, s_context=s_context) old_pc = pc + + # STM-ONLY JITDRIVER! self.jit_driver.jit_merge_point( pc=pc, self=self, method=method, s_context=s_context) + if rstm.should_break_transaction(False): + rstm.jit_stm_transaction_break_point() try: - rstm.jit_stm_transaction_break_point(False) self.step(s_context) except Return, nlr: From noreply at buildbot.pypy.org Thu Apr 3 11:31:42 2014 From: noreply at buildbot.pypy.org (eseckler) Date: Thu, 3 Apr 2014 11:31:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: interpreter break stm compilation fix. added stm hints. Message-ID: <20140403093142.7737D1C022D@cobra.cs.uni-duesseldorf.de> Author: Eric Seckler Branch: Changeset: r749:5cca4057febd Date: 2014-01-14 14:23 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/5cca4057febd/ Log: interpreter break stm compilation fix. added stm hints. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -6,6 +6,7 @@ from spyvm.tool.bitmanipulation import splitter from rpython.rlib import jit +from rpython.rlib.jit import hint from rpython.rlib import objectmodel, unroll try: from rpython.rlib import rstm @@ -242,8 +243,9 @@ self.jit_driver.jit_merge_point( pc=pc, self=self, method=method, s_context=s_context) - if rstm.should_break_transaction(False): + if rstm.jit_stm_should_break_transaction(False): rstm.jit_stm_transaction_break_point() + self = self._hints_for_stm() try: self.step(s_context) except Return, nlr: @@ -262,6 +264,10 @@ self.fork(f.w_frame, f.w_stm_process) + def _hints_for_stm(self): + self = hint(self, stm_write=True) + self = hint(self, access_directly=True) + return self def _get_adapted_tick_counter(self): # Normally, the tick counter is decremented by 1 for every message send. From noreply at buildbot.pypy.org Thu Apr 3 11:31:44 2014 From: noreply at buildbot.pypy.org (eseckler) Date: Thu, 3 Apr 2014 11:31:44 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: merge Message-ID: <20140403093144.03CE91C022D@cobra.cs.uni-duesseldorf.de> Author: Eric Seckler Branch: Changeset: r750:bead6833aef1 Date: 2014-01-14 14:28 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/bead6833aef1/ Log: merge diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,8 +1,12 @@ syntax: glob *.py[co] *~ -pypy-c-jit-62116-b027d4428675-linux +pypy-c-jit-* images/Squeak* +images/resources* +*package-cache/ +Squeak* +*TAGS targetimageloadingsmalltalk-*c images/package-cache versions diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -146,6 +146,8 @@ TAGGED_MAXINT = 2 ** (LONG_BIT - 2) - 1 TAGGED_MININT = -2 ** (LONG_BIT - 2) +TAGGED_MASK = int(2 ** (LONG_BIT - 1) - 1) + # Entries into SO_SPECIAL_SELECTORS_ARRAY: #(#+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -126,7 +126,9 @@ class Interpreter(object): - + _immutable_fields_ = ["space", "image", "image_name", + "max_stack_depth", "interrupt_counter_size", + "startup_time"] _w_last_active_context = None cnt = 0 _last_indent = "" @@ -140,9 +142,11 @@ def __init__(self, space, image=None, image_name="", trace=False, max_stack_depth=constants.MAX_LOOP_DEPTH): + import time self.space = space self.image = image self.image_name = image_name + self.startup_time = time.time() self.max_stack_depth = max_stack_depth self.remaining_stack_depth = max_stack_depth self._loop = False @@ -325,13 +329,12 @@ def check_for_interrupts(self, s_frame): # parallel to Interpreter>>#checkForInterrupts - import time, math # Profiling is skipped # We don't adjust the check counter size # use the same time value as the primitive MILLISECOND_CLOCK - now = int(math.fmod(time.time()*1000, constants.TAGGED_MAXINT/2)) + now = self.time_now() # XXX the low space semaphore may be signaled here # Process inputs @@ -345,6 +348,11 @@ # We do not support external semaphores. # In cog, the method to add such a semaphore is only called in GC. + def time_now(self): + import time + from rpython.rlib.rarithmetic import intmask + return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) + def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) diff --git a/spyvm/plugins/fileplugin.py b/spyvm/plugins/fileplugin.py --- a/spyvm/plugins/fileplugin.py +++ b/spyvm/plugins/fileplugin.py @@ -40,7 +40,10 @@ file_path = os.path.join(full_path, py_name) except OSError: raise PrimitiveFailedError - file_info = os.stat(file_path) + try: + file_info = os.stat(file_path) + except OSError: + raise PrimitiveFailedError w_name = space.wrap_string(py_name) w_creationTime = smalltalk_timestamp(space, file_info.st_ctime) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1019,8 +1019,7 @@ @expose_primitive(MILLISECOND_CLOCK, unwrap_spec=[object]) def func(interp, s_frame, w_arg): - import time, math - return interp.space.wrap_int(int(math.fmod(time.time()*1000, constants.TAGGED_MAXINT/2))) + return interp.space.wrap_int(interp.time_now()) @expose_primitive(SIGNAL_AT_MILLISECONDS, unwrap_spec=[object, object, int]) def func(interp, s_frame, w_delay, w_semaphore, timestamp): diff --git a/spyvm/test/jittest/__init__.py b/spyvm/test/jittest/__init__.py new file mode 100644 diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py new file mode 100644 --- /dev/null +++ b/spyvm/test/jittest/base.py @@ -0,0 +1,121 @@ +import subprocess +import os + +# TODO: +from pypy.tool.jitlogparser.parser import SimpleParser, Op +from pypy.tool.jitlogparser.storage import LoopStorage + +from rpython.jit.metainterp.resoperation import opname +from rpython.jit.tool import oparser +from rpython.tool import logparser + + +BasePath = os.path.abspath( + os.path.join( + os.path.join(os.path.dirname(__file__), os.path.pardir), + os.path.pardir, + os.path.pardir + ) +) +BenchmarkImage = os.path.join(os.path.dirname(__file__), "benchmark.image") + +class BaseJITTest(object): + def run(self, spy, tmpdir, code): + proc = subprocess.Popen( + [str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage], + cwd=str(tmpdir), + env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog")} + ) + proc.wait() + data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False) + data = logparser.extract_category(data, "jit-log-opt-") + + storage = LoopStorage() + traces = [SimpleParser.parse_from_input(t) for t in data] + main_loops = storage.reconnect_loops(traces) + traces_w = [] + for trace in traces: + if trace in main_loops: + traces_w.append(Trace(trace)) + else: + traces_w[len(traces_w) - 1].addbridge(trace) + return traces_w + + def assert_matches(self, trace, expected): + expected_lines = [ + line.strip() + for line in expected.splitlines() + if line and not line.isspace() + ] + parser = Parser(None, None, {}, "lltype", None, invent_fail_descr=None, nonstrict=True) + expected_ops = [parser.parse_next_op(l) for l in expected_lines] + aliases = {} + assert len(trace) == len(expected_ops) + for op, expected in zip(trace, expected_ops): + self._assert_ops_equal(aliases, op, expected) + + def _assert_ops_equal(self, aliases, op, expected): + assert op.name == expected.name + assert len(op.args) == len(expected.args) + for arg, expected_arg in zip(op.args, expected.args): + if arg in aliases: + arg = aliases[arg] + elif arg != expected_arg and expected_arg not in aliases.viewvalues(): + aliases[arg] = arg = expected_arg + assert arg == expected_arg + + +class Parser(oparser.OpParser): + def get_descr(self, poss_descr, allow_invent): + if poss_descr.startswith(("TargetToken", ") + i60 = int_le(i49, 10000) + guard_true(i60, descr=) + i61 = int_add(i49, 1) + i62 = int_sub(i61, -1073741824) + i63 = uint_lt(i62, -2147483648) + guard_true(i63, descr=) + i64 = int_sub(i57, 1) + setfield_gc(ConstPtr(ptr54), i64, descr=) + i65 = int_le(i64, 0) + guard_false(i65, descr=) + jump(p0, p3, i61, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i64, descr=TargetToken(169145008)) + """) + self.assert_matches(traces[0].bridges[0], """ + f18 = call(ConstClass(ll_time.ll_time_time), descr=) + setfield_gc(ConstPtr(ptr19), 10000, descr=) + guard_no_exception(descr=) + f22 = float_sub(f18, 1387380038.806162) + f24 = float_mul(f22, 1000.000000) + i25 = cast_float_to_int(f24) + i27 = int_and(i25, 2147483647) + i28 = getfield_gc(ConstPtr(ptr19), descr=) + i29 = int_is_zero(i28) + guard_true(i29, descr=) + label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, descr=TargetToken(158475216)) + guard_class(p0, ConstClass(MethodContextShadow), descr=) + p31 = getfield_gc(p0, descr=) + guard_value(p31, ConstPtr(ptr32), descr=) + guard_not_invalidated(descr=) + i34 = int_le(i16, 1000000000) + guard_true(i34, descr=) + i36 = int_add(i16, 1) + i38 = int_sub(i36, -1073741824) + i40 = uint_lt(i38, -2147483648) + guard_true(i40, descr=) + setfield_gc(ConstPtr(ptr19), 9999, descr=) + jump(p0, p1, i36, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, 9999, descr=TargetToken(158474976)) + """) + + def test_constant_string(self, spy, tmpdir): + traces = self.run(spy, tmpdir, """ + | i | + i := 0. + [i <= 10000] whileTrue: [ i := i + 'a' size]. + ^ i + """) + self.assert_matches(traces[0].loop, """ + label(p0, p3, i58, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i65, descr=TargetToken(153187472)) + debug_merge_point(0, 0, '2: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') + guard_not_invalidated(descr=) + debug_merge_point(0, 0, '3: [0x21]pushLiteralConstantBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '4: [0xb4]bytecodePrimLessOrEqual (codeTest1387373494)') + i68 = int_le(i58, 10000) + guard_true(i68, descr=) + debug_merge_point(0, 0, '5: [0x9e]shortConditionalJump (codeTest1387373494)') + debug_merge_point(0, 0, '6: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '7: [0x20]pushLiteralConstantBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '8: [0xc2]bytecodePrimSize (codeTest1387373494)') + debug_merge_point(0, 0, '9: [0xb0]bytecodePrimAdd (codeTest1387373494)') + i69 = int_add(i58, 1) + i70 = int_sub(i69, -1073741824) + i71 = uint_lt(i70, -2147483648) + guard_true(i71, descr=) + debug_merge_point(0, 0, '10: [0x68]storeAndPopTemporaryVariableBytecode (codeTest1387373494)') + debug_merge_point(0, 0, '11: [0xa3]longUnconditionalJump (codeTest1387373494)') + i72 = int_sub(i65, 1) + setfield_gc(ConstPtr(ptr55), i72, descr=) + i73 = int_le(i72, 0) + guard_false(i73, descr=) + debug_merge_point(0, 0, '2: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') + jump(p0, p3, i69, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i72, descr=TargetToken(153187472)) + """) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -59,6 +59,42 @@ except error.Exit, e: print e.msg +def _run_code(interp, code, as_benchmark=False): + import time + selector = "codeTest%d" % int(time.time()) + try: + w_result = interp.perform( + interp.space.w_SmallInteger, + "compile:classified:notifying:", + space.wrap_string("%s\r\n%s" % (selector, code)), + space.wrap_string("spy-run-code"), + space.w_nil + ) + except interpreter.ReturnFromTopLevel, e: + print e.object + return 1 + except error.Exit, e: + print e.msg + return 1 + + if not as_benchmark: + try: + w_result = interp.perform(space.wrap_int(0), selector) + except interpreter.ReturnFromTopLevel, e: + print e.object + return 1 + except error.Exit, e: + print e.msg + return 1 + if w_result: + if isinstance(w_result, model.W_BytesObject): + print w_result.as_string().replace('\r', '\n') + else: + print w_result.as_repr_string().replace('\r', '\n') + return 0 + else: + return _run_benchmark(interp, 0, selector, "") + space = objspace.ObjSpace() @@ -85,6 +121,8 @@ -m|--method [benchmark on smallint] -a|--arg [string argument to #method] --stm + -r|--run [code string] + -b|--benchmark [code string] [image path, default: Squeak.image] """ % argv[0] @@ -102,6 +140,8 @@ trace = False use_stm = False stringarg = "" + code = None + as_benchmark = False while idx < len(argv): arg = argv[idx] @@ -129,6 +169,16 @@ idx += 1 elif arg in ["--stm"]: use_stm = True + elif arg in ["-r", "--run"]: + _arg_missing(argv, idx, arg) + code = argv[idx + 1] + as_benchmark = False + idx += 1 + elif arg in ["-b", "--benchmark"]: + _arg_missing(argv, idx, arg) + code = argv[idx + 1] + as_benchmark = True + idx += 1 elif path is None: path = argv[idx] else: @@ -157,6 +207,8 @@ if benchmark is not None: print "Running Benchmark" return _run_benchmark(interp, number, benchmark, stringarg, use_stm) + elif code is not None: + return _run_code(interp, code, as_benchmark=as_benchmark) else: print "Running Image" _run_image(interp) From noreply at buildbot.pypy.org Thu Apr 3 11:31:45 2014 From: noreply at buildbot.pypy.org (eseckler) Date: Thu, 3 Apr 2014 11:31:45 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: moar stm Message-ID: <20140403093145.247711C022D@cobra.cs.uni-duesseldorf.de> Author: Eric Seckler Branch: Changeset: r751:81165d5663ee Date: 2014-01-14 16:13 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/81165d5663ee/ Log: moar stm diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -135,7 +135,7 @@ jit_driver = jit.JitDriver( greens=[], reds=['pc', 's_context', 'self', 'method'], - virtualizables=['s_context'], + # virtualizables=['s_context'], stm_do_transaction_breaks=True # get_printable_location=get_printable_location ) @@ -238,9 +238,9 @@ if jit.we_are_jitted(): self.quick_check_for_interrupt(s_context, dec=self._get_adapted_tick_counter()) - self.jit_driver.can_enter_jit( - pc=pc, self=self, method=method, - s_context=s_context) + #self.jit_driver.can_enter_jit( + # pc=pc, self=self, method=method, + # s_context=s_context) old_pc = pc # STM-ONLY JITDRIVER! diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -522,38 +522,38 @@ return hasattr(rgc, "stm_is_enabled") and rgc.stm_is_enabled() USES_STM = stm_enabled() -if USES_STM: - def get_instances_array(space, s_frame, w_class): - return [] -else: - def get_instances_array(space, s_frame, w_class): - # This primitive returns some instance of the class on the stack. - # Not sure quite how to do this; maintain a weak list of all - # existing instances or something? - match_w = s_frame.instances_array(w_class) - if match_w is None: - match_w = [] - from rpython.rlib import rgc - - roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] - pending = roots[:] - while pending: - gcref = pending.pop() - if not rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - w_obj = rgc.try_cast_gcref_to_instance(model.W_Object, gcref) - if (w_obj is not None and w_obj.has_class() - and w_obj.getclass(space) is w_class): - match_w.append(w_obj) - pending.extend(rgc.get_rpy_referents(gcref)) - - while roots: - gcref = roots.pop() - if rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - roots.extend(rgc.get_rpy_referents(gcref)) - s_frame.store_instances_array(w_class, match_w) - return match_w +#if USES_STM: +def get_instances_array(space, s_frame, w_class): + return [] +#else: +# def get_instances_array(space, s_frame, w_class): +# # This primitive returns some instance of the class on the stack. +# # Not sure quite how to do this; maintain a weak list of all +# # existing instances or something? +# match_w = s_frame.instances_array(w_class) +# if match_w is None: +# match_w = [] +# from rpython.rlib import rgc +# +# roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] +# pending = roots[:] +# while pending: +# gcref = pending.pop() +# if not rgc.get_gcflag_extra(gcref): +# rgc.toggle_gcflag_extra(gcref) +# w_obj = rgc.try_cast_gcref_to_instance(model.W_Object, gcref) +# if (w_obj is not None and w_obj.has_class() +# and w_obj.getclass(space) is w_class): +# match_w.append(w_obj) +# pending.extend(rgc.get_rpy_referents(gcref)) +# +# while roots: +# gcref = roots.pop() +# if rgc.get_gcflag_extra(gcref): +# rgc.toggle_gcflag_extra(gcref) +# roots.extend(rgc.get_rpy_referents(gcref)) +# s_frame.store_instances_array(w_class, match_w) +# return match_w @expose_primitive(SOME_INSTANCE, unwrap_spec=[object]) def func(interp, s_frame, w_class): diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -59,7 +59,7 @@ except error.Exit, e: print e.msg -def _run_code(interp, code, as_benchmark=False): +def _run_code(interp, code, as_benchmark=False, use_stm=False): import time selector = "codeTest%d" % int(time.time()) try: @@ -93,7 +93,7 @@ print w_result.as_repr_string().replace('\r', '\n') return 0 else: - return _run_benchmark(interp, 0, selector, "") + return _run_benchmark(interp, 0, selector, "", use_stm) space = objspace.ObjSpace() @@ -208,7 +208,7 @@ print "Running Benchmark" return _run_benchmark(interp, number, benchmark, stringarg, use_stm) elif code is not None: - return _run_code(interp, code, as_benchmark=as_benchmark) + return _run_code(interp, code, as_benchmark=as_benchmark, use_stm=use_stm) else: print "Running Image" _run_image(interp) From noreply at buildbot.pypy.org Thu Apr 3 11:31:46 2014 From: noreply at buildbot.pypy.org (eseckler) Date: Thu, 3 Apr 2014 11:31:46 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: move stm breakpoint to trampoline loop Message-ID: <20140403093146.3EE461C022D@cobra.cs.uni-duesseldorf.de> Author: Eric Seckler Branch: Changeset: r752:1a5c22d99f3f Date: 2014-01-14 17:04 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1a5c22d99f3f/ Log: move stm breakpoint to trampoline loop diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -134,7 +134,7 @@ _last_indent = "" jit_driver = jit.JitDriver( greens=[], - reds=['pc', 's_context', 'self', 'method'], + reds=['self', 'w_active_context', 's_new_context', 's_sender'], # virtualizables=['s_context'], stm_do_transaction_breaks=True # get_printable_location=get_printable_location @@ -205,7 +205,15 @@ #assert self.remaining_stack_depth == self.max_stack_depth # Need to save s_sender, c_loop will nil this on return s_sender = s_new_context.s_sender() + try: + # STM-ONLY JITDRIVER! + self.jit_driver.jit_merge_point( + self=self, w_active_context=w_active_context, s_new_context=s_new_context, s_sender=s_sender) + if rstm.jit_stm_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() + self = self._hints_for_stm() + s_new_context = self.c_loop(s_new_context) except StackOverflow, e: s_new_context = e.s_context @@ -243,15 +251,12 @@ # s_context=s_context) old_pc = pc - # STM-ONLY JITDRIVER! - self.jit_driver.jit_merge_point( - pc=pc, self=self, method=method, - s_context=s_context) - if rstm.jit_stm_should_break_transaction(False): - rstm.jit_stm_transaction_break_point() - self = self._hints_for_stm() try: self.step(s_context) + if pc % 2 == 0: + return s_context + if rstm.should_break_transaction(): + return s_context except Return, nlr: if nlr.s_target_context is not s_context: From noreply at buildbot.pypy.org Thu Apr 3 11:31:49 2014 From: noreply at buildbot.pypy.org (eseckler) Date: Thu, 3 Apr 2014 11:31:49 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: merge with rbitblt. moved stm merge/break points to separate loop. Message-ID: <20140403093149.DCF2A1C022D@cobra.cs.uni-duesseldorf.de> Author: Eric Seckler Branch: Changeset: r753:96455b41dc0c Date: 2014-01-14 18:16 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/96455b41dc0c/ Log: merge with rbitblt. moved stm merge/break points to separate loop. diff too long, truncating to 2000 out of 196343 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,5 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu Apr 3 11:32:00 2014 From: noreply at buildbot.pypy.org (eseckler) Date: Thu, 3 Apr 2014 11:32:00 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: other changes for stm debugging.. Message-ID: <20140403093200.526791C022D@cobra.cs.uni-duesseldorf.de> Author: Eric Seckler Branch: Changeset: r754:1bb50712c8fe Date: 2014-01-15 15:55 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1bb50712c8fe/ Log: other changes for stm debugging.. diff too long, truncating to 2000 out of 359951 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,40 +1,2 @@ -'From Squeak4.1 of 17 April 2010 [latest update: #9957] on 17 April 2010 at 5:22:05 pm'! ----STARTUP----{17 April 2010 . 5:21:54 pm} as C:\Squeak\4.0\4.1-final\Squeak4.1.image! Smalltalk appendChangesTo: 'SqueakV41.sources'.! ----QUIT----{17 April 2010 . 5:22:11 pm} Squeak4.1.image priorSource: 89! ----STARTUP----{24 May 2010 . 8:07:26 pm} as C:\Squeak\4.2\Squeak4.1.image! ----SNAPSHOT----{24 May 2010 . 8:08:14 pm} Squeak4.2.image priorSource: 229! !HashedCollection commentStamp: 'ul 4/12/2010 22:37' prior: 0! I am an abstract collection of objects that implement hash and equality in a consitent way. This means that whenever two objects are equal, their hashes have to be equal too. If two objects are equal then I can only store one of them. Hashes are expected to be integers (preferably SmallIntegers). I also expect that the objects contained by me do not change their hashes. If that happens, hash invariants have to be re-established, which can be done by #rehash. Since I'm abstract, no instances of me should exist. My subclasses should implement #scanFor:, #fixCollisionsFrom: and #noCheckNoGrowFillFrom:. Instance Variables array: (typically Array or WeakArray) tally: (non-negative) array - An array whose size is a prime number, it's non-nil elements are the elements of the collection, and whose nil elements are empty slots. There is always at least one nil. In fact I try to keep my "load" at 75% or less so that hashing will work well. tally - The number of elements in the collection. The array size is always greater than this. Implementation details: I implement a hash table which uses open addressing with linear probing as the method of collision resolution. Searching for an element or a free slot for an element is done by #scanFor: which should return the index of the slot in array corresponding to it's argument. When an element is removed #fixCollisionsFrom: should rehash all elements in array between the original index of the removed element, wrapping around after the last slot until reaching an empty slot. My maximum load factor (75%) is hardcoded in #atNewIndex:put:, so it can only be changed by overriding that method. When my load factor reaches this limit I replace my array with a larger one (see #grow) ensuring that my load factor will be less than or equal to 50%. The new array is filled by #noCheckNoGrowFillFrom: which should use #scanForEmptySlotFor: instead of #scanFor: for better performance. I do not shrink. ! !WeakKeyDictionary methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Collection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:33' prior: 18816249! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger timesRepeat: [self add: newObject]. ^ newObject! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAll "HashedCollection compactAll" self allSubclassesDo: #compactAllInstances! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAllInstances "Do not use #allInstancesDo: because compact may create new instances." self allInstances do: #compact! ! !HashedCollection class methodsFor: 'sizing' stamp: 'ul 4/7/2010 00:17' prior: 55063414! goodPrimes "Answer a sorted array of prime numbers less than one billion that make good hash table sizes. Should be expanded as needed. See comments below code" ^#( 5 11 17 23 31 43 59 79 107 149 199 269 359 479 641 857 1151 1549 2069 2237 2423 2617 2797 2999 3167 3359 3539 3727 3911 4441 4787 5119 5471 5801 6143 6521 6827 7177 7517 7853 8783 9601 10243 10867 11549 12239 12919 13679 14293 15013 15731 17569 19051 20443 21767 23159 24611 25847 27397 28571 30047 31397 35771 38201 40841 43973 46633 48989 51631 54371 57349 60139 62969 70589 76091 80347 85843 90697 95791 101051 106261 111143 115777 120691 126311 140863 150523 160969 170557 181243 190717 201653 211891 221251 232591 242873 251443 282089 300869 321949 341227 362353 383681 401411 422927 443231 464951 482033 504011 562621 605779 647659 681607 723623 763307 808261 844709 886163 926623 967229 1014617 1121987 1201469 1268789 1345651 1429531 1492177 1577839 1651547 1722601 1800377 1878623 1942141 2028401 2242727 2399581 2559173 2686813 2836357 3005579 3144971 3283993 3460133 3582923 3757093 3903769 4061261 4455361 4783837 5068529 5418079 5680243 6000023 6292981 6611497 6884641 7211599 7514189 7798313 8077189 9031853 9612721 10226107 10745291 11338417 11939203 12567671 13212697 13816333 14337529 14938571 15595673 16147291 17851577 18993941 20180239 21228533 22375079 23450491 24635579 25683871 26850101 27921689 29090911 30153841 31292507 32467307 35817611 37983761 40234253 42457253 44750177 46957969 49175831 51442639 53726417 55954637 58126987 60365939 62666977 64826669 71582779 76039231 80534381 84995153 89500331 93956777 98470819 102879613 107400389 111856841 116365721 120819287 125246581 129732203 143163379 152076289 161031319 169981667 179000669 187913573 196826447 205826729 214748357 223713691 232679021 241591901 250504801 259470131 285162679 301939921 318717121 335494331 352271573 369148753 385926017 402603193 419480419 436157621 453034849 469712051 486589307 503366497 520043707 570475349 603929813 637584271 671138659 704693081 738247541 771801929 805356457 838910803 872365267 905919671 939574117 973128521 1006682977 1040137411 1073741833) "The above primes past 2069 were chosen carefully so that they do not interact badly with 1664525 (used by hashMultiply), and so that gcd(p, (256^k) +/- a) = 1, for 0 cost ifTrue: [ cost := newCost ] ]. cost ]."! ! !HashedCollection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:38' prior: 53647096! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger < 1 ifTrue: [ ^newObject ]. ^self add: newObject "I can only store an object once." ! ! !HashedCollection methodsFor: 'private' stamp: 'ul 4/12/2010 22:53'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: tally * 4 // 3. self growTo: newCapacity! ! !WeakSet methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Symbol class methodsFor: 'class initialization' stamp: 'ul 4/13/2010 00:00' prior: 30357901! compactSymbolTable "Reduce the size of the symbol table so that it holds all existing symbols with 25% free space." | oldSize | Smalltalk garbageCollect. oldSize := SymbolTable capacity. SymbolTable compact. ^(oldSize - SymbolTable capacity) printString, ' slot(s) reclaimed'! ! KeyedIdentitySet class removeSelector: #goodPrimes! WeakIdentityKeyDictionary class removeSelector: #goodPrimes! IdentitySet class removeSelector: #goodPrimes! IdentityDictionary class removeSelector: #goodPrimes! "Collections"! !HashedCollectionTest methodsFor: 'test - class - sizing' stamp: 'ul 4/7/2010 00:18' prior: 58761579! testPrimes: primes | badPrimes | badPrimes := #(3 5 71 139 479 5861 277421). "These primes are less than the hashMultiply constant (1664525) and 1664525 \\ prime is close to 0 (mod prime). The following snippet reproduces these numbers: | hashMultiplyConstant | hashMultiplyConstant := 1 hashMultiply. (Integer primesUpTo: hashMultiplyConstant) select: [ :each | | remainder | remainder := hashMultiplyConstant \\ each. remainder <= 1 or: [ remainder + 1 = each ] ]." self assert: primes isSorted. primes do: [ :each | self assert: each isPrime. self deny: (each > 2069 and: [ badPrimes includes: each ]) ]. self assert: ( primes select: [ :p | | result | result := false. p > 2069 ifTrue: [ 1 to: 8 do: [ :k | 1 to: 32 do: [ :a | (p gcd: (256 raisedTo: k) + a) = 1 ifFalse: [ result := true ]. (p gcd: (256 raisedTo: k) - a) = 1 ifFalse: [ result := true ] ] ] ]. result ]) isEmpty.! ! HashedCollectionTest removeSelector: #testGoodPrimesForIdentityBasedHashedCollections! "CollectionsTests"! !MCMczReader methodsFor: 'as yet unclassified' stamp: 'bf 4/18/2010 18:38' prior: 22938947! extractInfoFrom: dict ^MCWorkingCopy infoFromDictionary: dict cache: self infoCache! ! !MCWorkingCopy class methodsFor: 'as yet unclassified' stamp: 'bf 4/19/2010 00:39' prior: 23215403! infoFromDictionary: aDictionary cache: cache | id | id := (aDictionary at: #id) asString. ^ cache at: id ifAbsentPut: [MCVersionInfo name: (aDictionary at: #name ifAbsent: ['']) id: (UUID fromString: id) message: (aDictionary at: #message ifAbsent: ['']) date: ([Date fromString: (aDictionary at: #date)] ifError: [nil]) time: ([Time fromString: (aDictionary at: #time)] ifError: [nil]) author: (aDictionary at: #author ifAbsent: ['']) ancestors: (self ancestorsFromArray: (aDictionary at: #ancestors ifAbsent: []) cache: cache) stepChildren: (self ancestorsFromArray: (aDictionary at: #stepChildren ifAbsent: []) cache: cache)]! ! !MCVersionInfo methodsFor: 'converting' stamp: 'bf 4/18/2010 23:25' prior: 23175569! asDictionary ^ Dictionary new at: #name put: name; at: #id put: id asString; at: #message put: message; at: #date put: date; at: #time put: time; at: #author put: author; at: #ancestors put: (self ancestors collect: [:a | a asDictionary]); yourself! ! "Monticello"! !BlockContextTest methodsFor: 'running' stamp: 'md 9/6/2005 19:56' prior: 50431957! setUp super setUp. aBlockContext := [100 at 100 corner: 200 at 200]. contextOfaBlockContext := thisContext.! ! !BehaviorTest methodsFor: 'tests' stamp: 'md 2/18/2006 16:42' prior: 17365994! testBinding self assert: Object binding value = Object. self assert: Object binding key = #Object. self assert: Object class binding value = Object class. "returns nil for Metaclasses... like Encoder>>#associationFor:" self assert: Object class binding key = nil.! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53956757! testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #EmbeddedSourceQCompress ). self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer sourceCode: code. self assert: (trailer kind == #EmbeddedSourceZip ). newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53957691! testEmbeddingTempNames | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer tempNames: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #TempsNamesQCompress ). self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer tempNames: code. self assert: (trailer kind == #TempsNamesZip ). newTrailer := trailer testEncoding. self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:17' prior: 53958613! testEncodingNoTrailer | trailer | trailer := CompiledMethodTrailer new. "by default it should be a no-trailer" self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:14' prior: 53959109! testEncodingSourcePointer | trailer | trailer := CompiledMethodTrailer new. CompiledMethod allInstancesDo: [:method | | ptr | trailer method: method. self assert: ( (ptr := method sourcePointer) == trailer sourcePointer). "the last bytecode index must be at 0" ptr ~= 0 ifTrue: [ self assert: (method endPC = trailer endPC) ]. ].! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53959564! testEncodingVarLengthSourcePointer | trailer newTrailer | trailer := CompiledMethodTrailer new. trailer sourcePointer: 1. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 1). trailer sourcePointer: 16r100000000000000. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 16r100000000000000). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53960108! testSourceByIdentifierEncoding | trailer id | trailer := CompiledMethodTrailer new. id := UUID new asString. trailer sourceIdentifier: id. self assert: (trailer kind == #SourceByStringIdentifier ). trailer := trailer testEncoding. self assert: (trailer kind == #SourceByStringIdentifier ). self assert: (trailer sourceIdentifier = id). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:49' prior: 53960643! testSourceBySelectorEncoding | trailer | trailer := CompiledMethodTrailer new. trailer setSourceBySelector. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CategorizerTest methodsFor: 'running' stamp: 'mtf 9/10/2007 10:10' prior: 18074036! setUp categorizer := Categorizer defaultList: #(a b c d e). categorizer classifyAll: #(a b c) under: 'abc'. categorizer addCategory: 'unreal'.! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074267! testClassifyNewElementNewCategory categorizer classify: #f under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') (''nice'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:18' prior: 18074541! testClassifyNewElementOldCategory categorizer classify: #f under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074806! testClassifyOldElementNewCategory categorizer classify: #e under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'') (''nice'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:54' prior: 18075078! testClassifyOldElementOldCategory categorizer classify: #e under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:22' prior: 18075341! testDefaultCategoryIsTransient "Test that category 'as yet unclassified' disapears when all it's elements are removed'" categorizer classifyAll: #(d e) under: #abc. self assert: categorizer printString = '(''abc'' a b c d e) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 15:15' prior: 18075669! testNullCategory "Test that category 'as yet unclassified' disapears when all it's elements are removed'" | aCategorizer | aCategorizer := Categorizer defaultList: #(). self assert: aCategorizer printString = '(''as yet unclassified'') '. self assert: aCategorizer categories = #('no messages'). aCategorizer classify: #a under: #b. self assert: aCategorizer printString = '(''b'' a) '. self assert: aCategorizer categories = #(b).! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18076194! testRemoveEmptyCategory categorizer removeCategory: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:55' prior: 18076430! testRemoveExistingElement categorizer removeElement: #a. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076673! testRemoveNonEmptyCategory self should: [categorizer removeCategory: #abc] raise: Error. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076950! testRemoveNonExistingCategory categorizer removeCategory: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18077203! testRemoveNonExistingElement categorizer removeElement: #f. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 14:49' prior: 18077451! testRemoveThenRename categorizer removeCategory: #unreal. categorizer renameCategory: #abc toBe: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''unreal'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:14' prior: 18077736! testUnchanged self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! "KernelTests"! !SmalltalkImage methodsFor: 'accessing' stamp: 'ul 4/18/2010 22:22'! at: key ifPresentAndInMemory: aBlock "Lookup the given key in the receiver. If it is present, answer the value of evaluating the given block with the value associated with the key. Otherwise, answer nil." ^globals at: key ifPresentAndInMemory: aBlock! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:45'! image "Answer the object to query about the current object memory and execution environment." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:47'! imageFormatVersion "Answer an integer identifying the type of image. The image version number may identify the format of the image (e.g. 32 or 64-bit word size) or specific requirements of the image (e.g. block closure support required). This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk image imageFormatVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveImageFormatVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:38'! interpreterSourceVersion "Answer a string corresponding to the version of the interpreter source. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, as distinct from the external platform source code, typically written in C and managed separately for each platform. An optional primitive is invoked that may not be available on all virtual machines." "Smalltalk vm interpreterSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveInterpreterSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! platformSourceVersion "Answer a string corresponding to the version of the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk vm platformSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitivePlatformSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'image' stamp: 'md 5/16/2006 12:34' prior: 58536670! version "Answer the version of this release." ^SystemVersion current version! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! versionLabel "Answer a string corresponding to the version of virtual machine. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, in addition to the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines. See also vmVersion, which answers a string identifying the image from which virtual machine sources were generated." "Smalltalk vm versionLabel" self notify: 'This virtual machine does not support the optional primitive #primitiveVMVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:15'! vm "Answer the object to query about virtual machine." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 1/4/2010 21:40' prior: 58537225! wordSize "Answer the size in bytes of an object pointer or word in the object memory. The value does not change for a given image, but may be modified by a SystemTracer when converting the image to another format. The value is cached in WordSize to avoid the performance overhead of repeatedly consulting the VM." "Smalltalk wordSize" ^ WordSize ifNil: [WordSize := [SmalltalkImage current vmParameterAt: 40] on: Error do: [4]]! ! "System"! !SMLoaderPlus commentStamp: 'btr 12/1/2006 15:16' prior: 0! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). It uses ToolBuilder to construct its window. You can open one with: SMLoaderPlus open Instance Variables categoriesToFilterIds: The set of categories to filter the packages list. filters: The set of filters to apply to the packages list. map: The model SqueakMap. packagesList: The list of packages from the map. selectedCategory: The current category. selectedItem: The selected package or release. window: The window, held only so we can reOpen.! !SMLoaderCategoricalPlus commentStamp: 'btr 12/4/2006 15:47' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategoricalPlus open! !SMLoader commentStamp: 'btr 11/30/2006 18:00' prior: 27913009! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). You can open one with: SMLoader open! !SMLoaderCategorical commentStamp: 'btr 12/1/2006 15:16' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategorical open! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 18:06'! initialize Smalltalk at: #ToolBuilder ifPresent: [:tb | (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! openMenuString ^ 'SqueakMap Categories'! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! removeFromSystem (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self removeFromSystem: true! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString].! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:50'! buildFancyWith: aBuilder "Creates a variant of the window where the package pane is split between installed and uninstalled packages." | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight)); yourself); add: ((self buildNotInstalledPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ (horizDivide / 2)); yourself); add: ((self buildInstalledPackagesListWith: builder) frame: (vertDivide @ (horizDivide / 2) corner: 1 @ horizDivide); yourself); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1); yourself); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. self setUpdatablePanesFrom: #(#installedPackageList #notInstalledPackageList ). currentPackageList := #notInstalled. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:56'! buildInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #installedPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:52'! buildNotInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #notInstalledPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:55'! buildWith: aBuilder | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight))); add: ((self buildPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ horizDivide)); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1)); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList ^currentPackageList! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList: aSymbol currentPackageList := aSymbol. self changed: #installButtonLabel.! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:55'! defaultLabel ^ 'Categorical ' , super defaultLabel! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:58'! installButtonLabel ^ self currentPackageList = #notInstalled ifTrue: ['Install the above package'] ifFalse: ['Remove the above package']! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:52'! installedPackageList ^self packageList select: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! installedPackagesListIndex ^ self currentPackageList = #installed ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! installedPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #installed ifTrue: [self currentPackageList: #installed. self changed: #currentPackageList]. self noteChanged! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! isOn ^false! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:53'! notInstalledPackageList ^self packageList reject: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! notInstalledPackagesListIndex ^ self currentPackageList = #notInstalled ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:03'! notInstalledPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #notInstalled ifTrue: [self currentPackageList: #notInstalled. self changed: #currentPackageList]. self changed: #packagesListIndex. "update my selection" self noteChanged. self contentsChanged! ! !SMLoaderCategoricalPlus methodsFor: 'private' stamp: 'btr 12/1/2006 17:53'! noteChanged self changed: #installedPackageList. self changed: #notInstalledPackageList. super noteChanged." self changed: #packageNameList. self changed: #packagesListIndex. self changed: #categoriesForPackage. self contentsChanged."! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:34'! packageList ^ self packages select: [:e | (e categories anySatisfy: [:cat | cat = self selectedCategory]) and: [(filters ifNil: [#()]) allSatisfy: [:currFilter | (self perform: currFilter) value: e]]]! ! !SMLoaderPlus class methodsFor: 'parts bin' stamp: 'btr 11/22/2006 15:02'! descriptionForPartsBin ^self partName: 'Package Loader' categories: #(Tools) documentation: 'SqueakMap UI' ! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47'! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifPresent: [:tb | self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white "not set" ifTrue: [ Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor) ]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! initializedInstance ^ (ToolBuilder open: self new) extent: 400 at 400! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! new "Create a SqueakMap loader on the default map." ^self newOn: SMSqueakMap default! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! newOn: aMap "Create a SqueakMap loader on given map." ^super new on: aMap; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! newStandAlone ^ ToolBuilder open: self new! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:13'! open "Create and open a SqueakMap Loader." "SMLoaderPlus open" ^ (Smalltalk at: #ToolBuilder) open: self new! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:21'! openOn: aSqueakMap "Create and open a SqueakMap Loader on a given map." "self openOn: SqueakMap default" ^ (Smalltalk at: #ToolBuilder) open: (self newOn: aSqueakMap)! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:18'! prototypicalToolWindow ^ ToolBuilder open: self new; applyModelExtent; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:02'! registerInFlapsRegistry "Register the receiver in the system's flaps registry." self environment at: #Flaps ifPresent: [:cl | (cl respondsTo: #registerQuad:forFlapNamed:) ifTrue: [cl registerQuad: #(#SMLoader #prototypicalToolWindow 'Package Loader' 'The SqueakMap Package Loader' ) forFlapNamed: 'Tools']]! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoaderPlus class methodsFor: 'window color' stamp: 'btr 11/22/2006 15:02'! windowColorSpecification "Answer a WindowColorSpec object that declares my preference." ^WindowColorSpec classSymbol: self name wording: 'Package Loader' brightColor: Color yellow muchLighter duller pastelColor: Color yellow veryMuchLighter duller helpMessage: 'The SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! addSelectedCategoryAsFilter "Add a new filter that filters on the currently selected category. Make it enabled as default." categoriesToFilterIds add: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 16:11'! askToLoadUpdates "Check how old the map is and ask to update it if it is older than 10 days or if there is no map on disk." | available | available := map isCheckpointAvailable. (available not or: [ (Date today subtractDate: (Date fromSeconds: (map directory directoryEntryFor: map lastCheckpointFilename) modificationTime)) > 3]) ifTrue: [ (self confirm: (available ifTrue: ['The map on disk is more than 10 days old, update it from the Internet?'] ifFalse: ['There is no map on disk, fetch it from the Internet?'])) ifTrue: [self loadUpdates]]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:43'! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. dir := item isPackage ifTrue: [map cache directoryForPackage: item] ifFalse: [map cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. "withLabel: item name, ' cache directory'." win openInWorld! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildButtonBarWith: aBuilder ^ aBuilder pluggablePanelSpec new model: self; layout: #horizontal; children: (self commandSpecs select: [ :spec | spec fourth includes: #all] thenCollect: [ :spec | aBuilder pluggableActionButtonSpec new model: self; label: spec first; action: spec second; help: spec third; enabled: ((spec fourth includes: #item) ifTrue: [#hasSelectedItem]); yourself]); name: #buttonBar; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildCategoriesListWith: aBuilder "Create the hierarchical list holding the category tree." ^ aBuilder pluggableTreeSpec new model: self; roots: #categoryList; getSelectedPath: #selectedCategoryPath; getChildren: #categoryChildren:; hasChildren: #categoryHasChildren:; setSelected: #selectedCategory:; menu: #categoriesMenu:; label: #categoryLabel:; autoDeselect: true; wantsDrop: true; name: #categoriesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagePaneWith: aBuilder "Create the text area to the right in the loader." ^ aBuilder pluggableTextSpec new model: self; getText: #itemDescription; name: #packagePane; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagesListWith: aBuilder "Create the hierarchical list holding the packages and releases." ^ aBuilder pluggableTreeSpec new model: self; roots: #packageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; name: #packagesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildSearchPaneWith: aBuilder ^ aBuilder pluggableInputFieldSpec new model: self; selection: #searchSelection; getText: #searchText; setText: #findPackage:notifying:; name: #search; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:54'! buildWith: aBuilder "Create the package loader window." | buttonBarHeight vertDivide horizDivide | buttonBarHeight := 0.07. vertDivide := 0.6. horizDivide := 0.3. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight)); add: ((self buildSearchPaneWith: builder) frame: (0 @ buttonBarHeight corner: horizDivide @ (buttonBarHeight * 2))); add: ((self buildPackagesListWith: builder) frame: (0 @ (buttonBarHeight * 2) corner: horizDivide @ vertDivide)); add: ((self buildCategoriesListWith: builder) frame: (0 @ vertDivide corner: horizDivide @ 1)); add: ((self buildPackagePaneWith: builder) frame: (horizDivide @ buttonBarHeight corner: 1 @ 1)); yourself); yourself). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! cachePackageReleaseAndOfferToCopy "Cache package release, then offer to copy it somewhere. Answer the chosen file's location after copy, or the cache location if no directory was chosen." | release installer newDir newName newFile oldFile oldName | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. installer := SMInstaller forPackageRelease: release. [UIManager default informUser: 'Caching ' , release asString during: [installer cache]] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs. ^nil ]. installer isCached ifFalse: [self inform: 'Download failed, see transcript for details'. ^nil]. oldName := installer fullFileName. newDir := FileList2 modalFolderSelector: installer directory. newDir ifNil: [ ^oldName ]. newDir = installer directory ifTrue: [ ^oldName ]. newName := newDir fullNameFor: installer fileName. newFile := FileStream newFileNamed: newName. newFile ifNil: [ ^oldName ]. newFile binary. oldFile := FileStream readOnlyFileNamed: oldName. oldFile ifNil: [ ^nil ]. oldFile binary. [[ newDir copyFile: oldFile toFile: newFile ] ensure: [ oldFile close. newFile close ]] on: Error do: [ :ex | ^oldName ]. ^newName! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categoriesMenu: aMenu "Answer the categories-list menu." self selectedCategory ifNotNil: [aMenu addList: self categorySpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! categoryChildren: aCategory ^ aCategory subCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! categoryHasChildren: aCategory ^ aCategory hasSubCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:46'! categoryLabel: aCategory ^ aCategory name! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01'! categoryList "Create the category list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (map categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/22/2006 15:02'! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:01'! commandSpecFor: selector ^ self commandSpecs detect: [:spec | spec second = selector]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:00'! commandSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.' (item all)) ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.' (item all)) ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.' (item all)) ('Copy from cache' cachePackageReleaseAndOfferToCopy 'Download selected release into cache first if needed, and then offer to copy it somewhere else.' (item)) ('Force download into cache' downloadPackageRelease 'Force a download of the selected release into the cache.' (item)) ('Update' loadUpdates 'Update the package index from the servers.' (all)) ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (conf8irming each).' (all)) ('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm '' (item)) ('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm '' (item)) ('Copy list' listInPasteBuffer 'Puts the list as text into the clipboard.' (all)) ('Save filters' saveFiltersAsDefault 'Saves the current filters as default.' (all)) ('Help' help 'What is this?' (all)))! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! defaultLabel ^ 'SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! downloadPackageRelease "Force a download of the selected package release into the cache." | release | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. [UIManager default informUser: 'Downloading ' , release asString during: [ (SMInstaller forPackageRelease: release) download] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterAdd: anObject self changeFilters: (self filters copyWith: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterAutoInstall ^[:package | package isInstallable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterAvailable ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterInstalled ^[:package | package isInstalled]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterNotInstalledYet ^[:package | package isInstalled not]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterNotUptoDate ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterPublished ^[:package | package isPublished]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterRemove: anObject self changeFilters: (self filters copyWithout: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterSafelyAvailable ^[:package | package isSafelyAvailable]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/30/2006 21:07'! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #(#('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') #('Installed packages' #filterInstalled 'Display only packages that are installed.') #('Published packages' #filterPublished 'Display only packages that have at least one published release.') ) asOrderedCollection. categoriesToFilterIds do: [:catId | specs add: {'Packages in ' , (map object: catId) name. catId. 'Display only packages that are in the category.'}]. ^ specs! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterVersion "Ignore spaces in the version string, they're sometimes spurious. Not used anymore." ^[:package | package categories anySatisfy: [:cat | (cat name, '*') match: (Smalltalk version copyWithout: $ ) ]]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filters ^filters! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/24/2006 13:49'! findPackage: aString notifying: aView "Search and select a package with the given (sub) string in the name or description. " | index list match descriptions | match := aString asString asLowercase. index := self packagesListIndex. list := self packageNameList. list isEmpty ifTrue: [^ self]. descriptions := self packageList collect: [:e | e description]. index + 1 to: list size do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. "wrap around" 1 to: index do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. self inform: 'No package matching ' , aString asString! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! generalOptions ^#( #('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm) #('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm) #('Put list in paste buffer' listInPasteBuffer) #('Save filters as default' saveFiltersAsDefault) #- ) ! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 18:36'! hasSelectedItem ^ self selectedPackageOrRelease notNil! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! help "Present help text. If there is a web server available, offer to open it. Use the WebBrowser registry if possible, or Scamper if available." | message browserClass | message := 'Welcome to the SqueakMap package loader. The names of packages are followed by versions: (installed -> latest). If there is no arrow, your installed version of the package is the latest. Bold packages and releases have been installed. The checkbox menu items modify which packages you''ll see. Take a look at them - only some packages are shown initially. The options available for a package depend on how it was packaged. Comment on a package by emailing the author or the squeak list.'. browserClass := Smalltalk at: #WebBrowser ifPresent: [ :registry | registry default ]. browserClass := browserClass ifNil: [ Smalltalk at: #Scamper ifAbsent: [ ^self inform: message ]]. (self confirm: message, ' Would you like to view more detailed help on the SqueakMap swiki page?') ifTrue: [ browserClass openOnUrl: 'http://wiki.squeak.org/2726' asUrl]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! informException: ex msg: msg "Tell the user that an error has occurred. Offer to open debug notifier." (self confirm: msg, 'Would you like to open a debugger?') ifTrue: [ex pass]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 05:28'! initialExtent ^500 at 400! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! installPackageRelease "Install selected package or release. The cache is used." | item release | item := self selectedPackageOrRelease ifNil: [^ nil]. item isPackageRelease ifTrue: [ (item isPublished or: [self confirm: 'Selected release is not published yet, install anyway?']) ifTrue: [^self installPackageRelease: item]] ifFalse: [ release := item lastPublishedReleaseForCurrentSystemVersion. release ifNil: [ (self confirm: 'The package has no published release for your Squeak version, try releases for any Squeak version?') ifTrue: [ release := item lastPublishedRelease. release ifNil: [ (self confirm: 'The package has no published release at all, take the latest of the unpublished releases?') ifTrue: [release := item lastRelease]]]]. release ifNotNil: [^self installPackageRelease: release]]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:53'! installPackageRelease: aRelease "Install a package release. The cache is used." | myRelease installer | aRelease isCompatibleWithCurrentSystemVersion ifFalse: [(self confirm: 'The package you are about to install is not listed as being compatible with your image version (', SystemVersion current majorMinorVersion, '), so the package may not work properly. Do you still want to proceed with the install?') ifFalse: [^ self]]. myRelease := self installedReleaseOfMe. installer := SMInstaller forPackageRelease: aRelease. [UIManager default informUser: 'Downloading ' , aRelease asString during: [installer download]. UIManager default informUser: 'Installing ' , aRelease asString during: [ installer install. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil:[ex asString]. self informException: ex msg: ('Error occurred during install:\', msg, '\') withCRs].! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! installedReleaseOfMe "Return the release of the installed package loader." ^SMSqueakMap default installedReleaseOf: (SMSqueakMap default packageWithId: '941c0108-4039-4071-9863-a8d7d2b3d4a3').! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! itemChildren: anItem ^ anItem isPackage ifTrue: [anItem releases] ifFalse: [#()]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 19:56'! itemDescription ^ self selectedPackageOrRelease ifNil: [''] ifNotNilDo: [:item | item fullDescription]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! itemHasChildren: anItem ^ anItem isPackage and: [anItem releases notEmpty]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! itemLabel: anItem | label | label := anItem isPackage ifTrue: [anItem name , (anItem versionLabel ifEmpty: [''] ifNotEmptyDo: [:lbl | ' (' , anItem versionLabel , ')'])] ifFalse: [anItem smartVersion]. ^ anItem isInstalled ifTrue: [label asText allBold] ifFalse: [label]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/24/2006 17:17'! label ^ self labelForShown: (packagesList ifNil: [self packageList])! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! labelForFilter: aFilterSymbol ^(self filterSpecs detect: [:fs | fs second = aFilterSymbol]) first! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! labelForShown: packagesShown "Update the label of the window." ^ self defaultLabel , ' (', (packagesShown size < map packages size ifTrue: [packagesShown size printString, ' shown out of '] ifFalse: ['']) , map packages size printString, ' packages)'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! listInPasteBuffer "Useful when talking with people etc. Uses the map to produce a nice String." Clipboard clipboardText: (String streamContents: [:s | packagesList do: [:p | s nextPutAll: p nameWithVersionLabel; cr ]]) asText! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:31'! loadUpdates [UIManager default informUser: 'Loading Updates' during: [ map loadUpdates. self noteChanged ] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when updating map:\', ex messageText, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/24/2006 14:05'! noteChanged filters ifNil: [^ self reOpen]. map ifNotNil: [packagesList := nil. selectedCategory := nil. self changed: #categoryList. self changed: #packageList. self changed: #packagesListIndex. "update my selection" self contentsChanged]! ! !SMLoaderPlus methodsFor: 'initialization' stamp: 'btr 11/22/2006 16:11'! on: aSqueakMap "Initialize instance." map := aSqueakMap. map synchWithDisk. filters := DefaultFilters copy. categoriesToFilterIds := DefaultCategoriesToFilterIds copy. self askToLoadUpdates! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! package: aPackage filteredByCategory: aCategory "Answer true if the package should be shown if we filter on . It should be shown if itself or any of its releases has the category." | releases | releases := aPackage releases. ^(aPackage hasCategoryOrSubCategoryOf: aCategory) or: [ releases anySatisfy: [:rel | rel hasCategoryOrSubCategoryOf: aCategory]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageList "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." | list | list := packagesList ifNil: [packagesList := self packageListCalculated]. selectedCategory ifNotNil: [ list := list select: [:each | self package: each filteredByCategory: selectedCategory]]. self updateLabel: list. ^ list! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageListCalculated "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." ^ self packages select: [:p | filters allSatisfy: [:currFilter | currFilter isSymbol ifTrue: [(self perform: currFilter) value: p] ifFalse: [self package: p filteredByCategory: (map object: currFilter)]]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! packageNameList ^ self packageList collect: [:e | e name]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:30'! packageSpecificOptions | choices packageOrRelease | packageOrRelease := self selectedPackageOrRelease. choices := OrderedCollection new. packageOrRelease isInstallable ifTrue: [ choices add: (self commandSpecFor: #installPackageRelease)]. (packageOrRelease isDownloadable and: [packageOrRelease isCached]) ifTrue: [ choices add: (self commandSpecFor: #browseCacheDirectory)]. (packageOrRelease isPackageRelease and: [packageOrRelease isDownloadable]) ifTrue: [ choices add: (self commandSpecFor: #cachePackageReleaseAndOfferToCopy). choices add: (self commandSpecFor: #downloadPackageRelease)]. choices add: (self commandSpecFor: #emailPackageMaintainers). ^ choices! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 16:11'! packages "We request the packages as sorted by name by default." ^map packagesByName asArray ! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex ^ self packageList indexOf: self selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex: anObject self selectedItem: (anObject = 0 ifFalse: [self packageList at: anObject])! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! packagesMenu: aMenu "Answer the packages-list menu." self selectedPackageOrRelease ifNotNil: [aMenu addList: self packageSpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:45'! perform: selector orSendTo: otherTarget "Selector was just chosen from a menu by a user. If can respond, then perform it on myself. If not, send it to otherTarget, presumably the editPane from which the menu was invoked." ^ (self respondsTo: selector) ifTrue: [self perform: selector] ifFalse: [super perform: selector orSendTo: otherTarget]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/26/2006 23:22'! reOpen "Close this package loader, probably because it has been updated, and open a new one." self inform: 'This package loader has been upgraded and will be closed and reopened to avoid strange side effects.'. window delete. (Smalltalk at: self class name) open! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeCategoryFilters "Remove all category filters." categoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeSelectedCategoryAsFilter "Remove the filter that filters on the currently selected category." categoriesToFilterIds remove: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! saveFiltersAsDefault "Save the current filters as default so that they are selected the next time the loader is opened." DefaultFilters := filters copy. DefaultCategoriesToFilterIds := categoriesToFilterIds copy! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchSelection "Selects all of the default search text so that a type-in overwrites it." ^ {1. self searchText size}! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchText "A dummy default search text so that the field describes its purpose." ^ 'Search packages'! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedCategory "Return selected category." ^ selectedCategory! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:37'! selectedCategory: anSMCategory "Change the selected category." selectedCategory := anSMCategory. selectedCategory ifNotNil: [(selectedCategory objects includes: self selectedItem) ifFalse: [self selectedItem: nil]]. self changed: #selectedCategory. self changed: #packageList! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:52'! selectedCategoryPath "Return selected category's path." | path | path := #(). selectedCategory ifNotNil: [selectedCategory parent ifNotNilDo: [:p | path := path copyWith: p]. path := path copyWith: selectedCategory]. ^ path collect: [:cat | self categoryLabel: cat]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedItem ^ selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:27'! selectedItem: anItem "This == workaround protects us from recursion since ToolBuilder's tree widgets will always tell us that the selection has been updated when we tell it that the selection path has been updated. Cleaner solutions invited." anItem == selectedItem ifFalse: [ selectedItem := anItem. self changed: #selectedItemPath. self changed: #itemDescription. self changed: #hasSelectedItem]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:16'! selectedItemPath | path | path := #(). (selectedItem isKindOf: SMPackageRelease) ifTrue: [path := path copyWith: selectedItem package]. selectedItem ifNotNil: [path := path copyWith: selectedItem]. ^ path! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:03'! selectedPackageOrRelease "Return selected package or package release." ^ selectedItem! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! showFilterString: aFilterSymbol ^(self stateForFilter: aFilterSymbol), (self labelForFilter: aFilterSymbol)! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! stateForFilter: aFilterSymbol ^(self filters includes: aFilterSymbol) ifTrue: [''] ifFalse: [''] ! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! toggleFilterState: aFilterSymbol ^(self filters includes: (aFilterSymbol)) ifTrue: [self filterRemove: aFilterSymbol] ifFalse: [self filterAdd: aFilterSymbol]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! uncheckFilters "Uncheck all filters." filters := OrderedCollection new. self noteChanged! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! updateLabel: packagesShown "Update the label of the window." window ifNotNilDo: [:w | w setLabel: (self labelForShown: packagesShown)]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackages "Tries to upgrade all installed packages to the latest published release for this version of Squeak. So this is a conservative approach." | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [ ^self inform: 'All ', installed size printString, ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [ ^self inform: 'None of the ', old size printString, ' old packages of the ', installed size printString, ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: [ 'Of the ', old size printString, ' old packages only ', toUpgrade size printString, ' can be upgraded. The following packages will not be upgraded: ', (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info, ' About to upgrade the following packages: ', (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]), 'Proceed?') ifTrue: [ myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [ map upgradeOldPackages. self inform: toUpgrade size printString, ' packages successfully upgraded.'. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\', ex messageText, '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. Confirms on each upgrade." ^ self upgradeInstalledPackagesConfirm: true! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackagesConfirm: confirmEach "Tries to upgrade all installed packages to the latest published release for this version of Squeak. If confirmEach is true we ask for every upgrade. " | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [^ self inform: 'All ' , installed size printString , ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [^ self inform: 'None of the ' , old size printString , ' old packages of the ' , installed size printString , ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: ['Of the ' , old size printString , ' old packages only ' , toUpgrade size printString , ' can be upgraded. The following packages will not be upgraded: ' , (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info , ' About to upgrade the following packages: ' , (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]) , 'Proceed?') ifTrue: [myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [confirmEach ifTrue: [map upgradeOldPackagesConfirmBlock: [:p | self confirm: 'Upgrade ' , p installedRelease packageNameWithVersion , ' to ' , (p lastPublishedReleaseForCurrentSystemVersionNewerThan: p installedRelease) listName , '?']] ifFalse: [map upgradeOldPackages]. self inform: toUpgrade size printString , ' packages successfully processed.'. myRelease = self installedReleaseOfMe ifTrue: [self noteChanged] ifFalse: [self reOpen]]] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\' , ex messageText , '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesNoConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. No confirmation on each upgrade." ^ self upgradeInstalledPackagesConfirm: false! ! !SMPackageWrapper methodsFor: 'comparing' stamp: 'dvf 9/21/2003 16:25' prior: 27998626! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMPackageWrapper methodsFor: 'converting' stamp: 'btr 11/22/2006 00:54' prior: 27998778! asString | string | string := item name, ' (', item versionLabel, ')'. item isInstalled ifTrue: [string := string asText allBold]. "(string includesSubString: '->') ifTrue: [string := string asText color: Color green]." ^ string! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'dvf 10/14/2003 18:58' prior: 27998902! contents ^item releases reversed collect: [:e | SMPackageReleaseWrapper with: e]! ! !SMPackageWrapper methodsFor: 'testing' stamp: 'dvf 9/21/2003 16:25' prior: 27999070! hash ^self withoutListWrapper hash! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:55'! help ^ 'This shows all packages with their releases that should be displayed according the current filter.'! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString! ! !SMPackageWrapper methodsFor: 'printing' stamp: 'dvf 9/21/2003 16:22' prior: 27999192! printOn: aStream aStream nextPutAll: 'wrapper for: ', item printString! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849043! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMCategoryWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 18:53' prior: 27849195! asString ^ item name , ' (' , self numberOfObjects printString , ')'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'ar 2/9/2004 02:35' prior: 27849301! category ^item! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 21:02' prior: 27849402! contents ^ item subCategories collect: [:n | self class with: n model: n]! ! !SMCategoryWrapper methodsFor: 'model access' stamp: 'btr 11/30/2006 21:02'! getList ^ Array with: (self class with: self contents model: model)! ! !SMCategoryWrapper methodsFor: 'testing' stamp: 'btr 11/30/2006 18:53'! hasContents ^ item hasSubCategories! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849700! hash ^self withoutListWrapper hash! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:56'! help ^ 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'BJP 11/22/2002 14:17'! model ^model! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 18:53'! numberOfObjects " | total | total _ 0. model allCategoriesDo: [:c | total _ total + c objects size]. ^total" ^item objects size! ! !SMPackageReleaseWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 21:30' prior: 27997393! asString "Show installed releases with a trailing asterisk." | string | string := item smartVersion. "Older SMBase versions don't have isInstalled.'" (item respondsTo: #isInstalled) ifTrue: [item isInstalled ifTrue: [string := (string , ' *') asText allBold]]. ^ string! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 17:14'! contents ^ #()! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString ! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47' prior: 27944626! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifAbsent: [self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white ifTrue: ["not set" Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor)]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52' prior: 27945298! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:08' prior: 54331069! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:15' prior: 27927912! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. item ifNil: [^nil]. dir := item isPackage ifTrue: [model cache directoryForPackage: item] ifFalse: [model cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. " withLabel: item name, ' cache directory'." win openInWorld ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:52'! buildButtonBar | aRow btn | aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. self buttonSpecs do: [:spec | btn := self buildButtonNamed: spec first helpText: spec third action: spec second. aRow addMorphBack: btn] separatedBy: [aRow addTransparentSpacerOfSize: 3 at 0]. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:27'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 19:04' prior: 27928394! buildMorphicCategoriesList "Create the hierarchical list holding the category tree." | list | list := (SimpleHierarchicalListMorph on: self list: #categoryWrapperList selected: #selectedCategoryWrapper changeSelected: #selectedCategoryWrapper: menu: #categoriesMenu: keystroke: nil) autoDeselect: true; enableDrag: false; enableDrop: true; yourself. list setBalloonText: 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'. "list scroller submorphs do:[:each| list expandAll: each]." list adjustSubmorphPositions. ^ list! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 00:22' prior: 27929139! buildMorphicPackagesList "Create the hierarchical list holding the packages and releases." ^(SimpleHierarchicalListMorph on: self list: #packageWrapperList selected: #selectedItemWrapper changeSelected: #selectedItemWrapper: menu: #packagesMenu: keystroke: nil) autoDeselect: false; enableDrag: false; enableDrop: true; setBalloonText: 'This shows all packages with their releases that should be displayed according the current filter.'; yourself! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:13'! buildPackageButtonBar | aRow | "Somewhat patterned after IRCe's buttonRow method." aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'gk 5/5/2006 02:05' prior: 27929686! buildPackagePane "Create the text area to the right in the loader." | ptm | ptm := PluggableTextMorph on: self text: #contents accept: nil readSelection: nil "#packageSelection " menu: nil. ptm setBalloonText: 'This is where the selected package or package release is displayed.'. ptm lock. ^ptm! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:08' prior: 27930070! buildSearchPane "Cribbed from MessageNames>>inMorphicWindowWithInitialSearchString:" | typeInView searchButton typeInPane | typeInView := PluggableTextMorph on: self text: nil accept: #findPackage:notifying: readSelection: nil menu: nil. typeInView acceptOnCR: true; vResizing: #spaceFill; hResizing: #spaceFill; setTextMorphToSelectAllOnMouseEnter; askBeforeDiscardingEdits: false; setProperty: #alwaysAccept toValue: true. (typeInView respondsTo: #hideScrollBarsIndefinitely) ifTrue: [typeInView hideScrollBarsIndefinitely] ifFalse: [typeInView hideScrollBarIndefinitely]. searchButton := SimpleButtonMorph new target: typeInView; color: Color white; label: 'Search'; actionSelector: #accept; arguments: #(); yourself. typeInPane := AlignmentMorph newRow. typeInPane vResizing: #shrinkWrap; hResizing: #shrinkWrap; listDirection: #leftToRight; addMorphFront: searchButton; addTransparentSpacerOfSize: 6 @ 0; addMorphBack: typeInView; setBalloonText: 'Type into the pane, then press Search (or hit RETURN) to visit the next package matching what you typed.'. ^ typeInPane! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:24'! buttonSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.') ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.') ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.') ('Update' loadUpdates 'Update the package index from the servers.') ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (confirming each).') ('Help' help 'What is this?'))! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:11' prior: 27936393! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoader methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01' prior: 27933585! categoryWrapperList "Create the wrapper list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (model categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list collect: [:cat | SMCategoryWrapper with: cat model: self]! ! !SMLoader methodsFor: 'filter utilities' stamp: 'gk 7/10/2004 15:45' prior: 27913226! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 17:30' prior: 27930584! createWindow | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.3. horizDivide := 0.6. self addMorph: (self buildButtonBar borderWidth: 0) frame: (0.0 @ 0.0 corner: 1.0 @ buttonBarHeight). self addMorph: (self buildSearchPane borderWidth: 0) frame: (0.0 @ buttonBarHeight corner: vertDivide @ searchHeight). self addMorph: (self buildMorphicPackagesList borderWidth: 0) frame: (0.0 @ (buttonBarHeight + searchHeight) corner: vertDivide @ horizDivide). self addMorph: (self buildMorphicCategoriesList borderWidth: 0) frame: (0.0 @ horizDivide corner: vertDivide @ 1.0). self addMorph: (self buildPackagePane borderWidth: 0) frame: (vertDivide @ buttonBarHeight corner: 1.0 @ 1.0). self on: #mouseEnter send: #paneTransition: to: self. self on: #mouseLeave send: #paneTransition: to: self! ! !SMLoader methodsFor: 'interface' stamp: 'gk 7/12/2004 11:14' prior: 27931214! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoader methodsFor: 'interface' stamp: 'btr 12/1/2006 02:01'! defaultLabel ^'SqueakMap Package Loader'! ! !SMLoader methodsFor: 'actions' stamp: 'btr 11/22/2006 01:14' prior: 27917579! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoader methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 00:14' prior: 27923782! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #( #('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') #('Installed packages' #filterInstalled 'Display only packages that are installed.') From noreply at buildbot.pypy.org Thu Apr 3 11:32:01 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:01 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: deleted unexplainable except block Message-ID: <20140403093201.72AE11C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r755:b7366f346f2b Date: 2014-01-15 14:23 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/b7366f346f2b/ Log: deleted unexplainable except block diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -191,9 +191,6 @@ print "Interpreter starting" try: self.loop(w_frame) - except ProcessSwitch, e: - # W00t: Can I haz explainaiatain? - self.interpret_with_w_frame(e.s_new_context.w_self()) except ReturnFromTopLevel, e: return e.object diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -226,8 +226,10 @@ if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True + driver.config.translation.rweakref = True + driver.config.translation.shared = False # driver.config.translation.jit = True - + print driver.config.translation return entry_point, None From noreply at buildbot.pypy.org Thu Apr 3 11:32:10 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Fixed sefault by moving STMProcess>>wait to STMProcess>>primWait and calling "self wait." Message-ID: <20140403093210.553961C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r756:da0b32b19670 Date: 2014-01-16 21:54 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/da0b32b19670/ Log: Fixed sefault by moving STMProcess>>wait to STMProcess>>primWait and calling "self wait." Reused priority as lock-variable. Reconstructed STMProcessWrapper inside the new thread instead of passing it via the bootstrapper. diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,2 +1,2 @@ - ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! \ No newline at end of file + ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index 4af3b0fd809db80daefbccdd3d8413855fd13f69..dc45966a305272c20be414ebb0d82b0468d38509 GIT binary patch [cut] diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -81,7 +81,7 @@ # wait for previous thread to start, then set global state def acquire(interp, w_frame, w_stm_process): #bootstrapper.lock.acquire(True) - bootstrapper.lock.wait(1) + bootstrapper.lock.wait(1, 'bootstrap') bootstrapper.interp = interp bootstrapper.w_frame = w_frame bootstrapper.w_stm_process = w_stm_process @@ -93,7 +93,7 @@ bootstrapper.interp = None bootstrapper.w_frame = None bootstrapper.w_stm_process = None - bootstrapper.lock.signal() + bootstrapper.lock.signal('bootstrap') #bootstrapper.lock.release() release = staticmethod(release) @@ -106,16 +106,17 @@ w_stm_process = bootstrapper.w_stm_process assert isinstance(interp, Interpreter) assert isinstance(w_frame, model.W_PointersObject) - assert isinstance(w_stm_process, wrapper.StmProcessWrapper) + assert isinstance(w_stm_process, model.W_PointersObject) bootstrapper.num_threads += 1 bootstrapper.release() # ...aaaaand go! - interp.interpret_with_w_frame(w_frame) + wrapper.StmProcessWrapper(interp.space, w_stm_process).store_lock(1) + + interp.interpret_with_w_frame(w_frame, may_context_switch=False) # Signal waiting processes - print "Signal" - w_stm_process.signal() + wrapper.StmProcessWrapper(interp.space, w_stm_process).signal('thread') # cleanup bootstrapper.num_threads -= 1 @@ -187,13 +188,13 @@ rthread.start_new_thread(bootstrapper.bootstrap, ()) print "Parent interpreter resuming" - def interpret_with_w_frame(self, w_frame): + def interpret_with_w_frame(self, w_frame, may_context_switch=True): print "Interpreter starting" try: - self.loop(w_frame) + self.loop(w_frame, may_context_switch) except ProcessSwitch, e: # W00t: Can I haz explainaiatain? - self.interpret_with_w_frame(e.s_new_context.w_self()) + self.interpret_with_w_frame(e.s_new_context.w_self(),may_context_switch) except ReturnFromTopLevel, e: return e.object @@ -204,7 +205,7 @@ return conftest.option.bc_trace return conftest.option.prim_trace - def loop(self, w_active_context): + def loop(self, w_active_context, may_context_switch=True): # just a trampoline for the actual loop implemented in c_loop self._loop = True s_new_context = w_active_context.as_context_get_shadow(self.space) @@ -214,7 +215,7 @@ s_sender = s_new_context.s_sender() try: - self.stmloop(s_new_context) + self.stmloop(s_new_context, may_context_switch) except StackOverflow, e: s_new_context = e.s_context except Return, nlr: @@ -233,8 +234,8 @@ def stmloop(self, s_context, may_context_switch=True): while True: - if rstm.should_break_transaction(False): - print "will break transaction" + # if rstm.should_break_transaction(): + # print "will break transaction" # STM-ONLY JITDRIVER! self.jit_driver.jit_merge_point( @@ -249,7 +250,7 @@ if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) - method = s_context.s_method() + while True: pc = s_context.pc() if pc < old_pc: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1403,7 +1403,7 @@ from rpython.rlib import rstm print "STM_SIGNAL primitive called" - wrapper.StmProcessWrapper(interp.space, w_rcvr).signal() + wrapper.StmProcessWrapper(interp.space, w_rcvr).signal('primitive') #rstm.should_break_transaction() @@ -1414,7 +1414,7 @@ print "STM_WAIT primitive called" # wait(0) behaves like a barrier, it waits for but does not acquire the lock - wrapper.StmProcessWrapper(interp.space, w_rcvr).wait(0) + wrapper.StmProcessWrapper(interp.space, w_rcvr).wait(0, 'primitive') print "STM Rendezvous" print "Should break: %s" % rstm.should_break_transaction() diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -120,29 +120,40 @@ class PartialBarrier(object): _mixin_ = True - def signal(self): + def signal(self, what='unknown'): + print "[lock] signal %s" % what self.store_lock(0) #rstm.should_break_transaction() - def _test_and_set(self, i): + def _test_and_set(self, i, what=''): rstm.increment_atomic() old_value = self.lock() self.store_lock(i) rstm.decrement_atomic() + print "[lock] read %s, set %s, %s" % (old_value, i, what) return old_value # i = 0 just waits but does not acquire (Barrier) # i = 1 waits and acquires (Mutex) - def wait(self, i): + def wait(self, i, what=''): import time - while self._test_and_set(i): + print '[lock] %s waits' % what + + # first, we have to wait for the lock + while self._test_and_set(1, what): time.sleep(0.005) #rstm.should_break_transaction() + # then we can modify the lock (i.e. setting it back to 0) + self.store_lock(i) + + print '[lock] %s continues' % what + class StmProcessWrapper(ProcessWrapper, PartialBarrier): - lock, store_lock = make_int_getter_setter(8) + # Mis-using priority as lock, we don't need prios :P + lock, store_lock = make_int_getter_setter(2) def put_to_sleep(self): # Must not queue @@ -172,7 +183,7 @@ print "Breaking interpreter loop for forking" # we need to pass control to the interpreter loop here self.store_lock(1) - raise StmProcessFork(w_frame, self) + raise StmProcessFork(w_frame, self._w_self) class LinkedListWrapper(Wrapper): From noreply at buildbot.pypy.org Thu Apr 3 11:32:11 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: merged stuff Message-ID: <20140403093211.7CC211C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r757:e496904283be Date: 2014-01-16 22:06 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/e496904283be/ Log: merged stuff diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -191,10 +191,7 @@ def interpret_with_w_frame(self, w_frame, may_context_switch=True): print "Interpreter starting" try: - self.loop(w_frame, may_context_switch) - except ProcessSwitch, e: - # W00t: Can I haz explainaiatain? - self.interpret_with_w_frame(e.s_new_context.w_self(),may_context_switch) + self.loop(w_frame) except ReturnFromTopLevel, e: return e.object diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -228,8 +228,11 @@ driver.config.translation.thread = True driver.config.translation.gc = "stmgc" driver.config.translation.gcrootfinder = 'stm' + driver.config.translation.rweakref = True + driver.config.translation.shared = False + # driver.config.translation.jit = True - + print driver.config.translation return entry_point, None From noreply at buildbot.pypy.org Thu Apr 3 11:32:20 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Implemented BlockClosure >> atomic. Message-ID: <20140403093220.66ED11C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r758:d053e5d75087 Date: 2014-01-16 23:11 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d053e5d75087/ Log: Implemented BlockClosure >> atomic. Usage: [sum := sum + 1] atomic value. diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,2 +1,2 @@ - ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! \ No newline at end of file + ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index dc45966a305272c20be414ebb0d82b0468d38509..52d50d90e1a3266b6a85440b56a284a4acc08984 GIT binary patch [cut] diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1235,6 +1235,8 @@ STM_FORK = 1299 # 787 (+ 512) # resume in native thread STM_SIGNAL = 1300 # 788 STM_WAIT = 1301 # 789 +STM_ATOMIC_ENTER = 1302 # 790 +STM_ATOMIC_LEAVE = 1303 # 791 SUSPEND = 88 FLUSH_CACHE = 89 @@ -1418,6 +1420,20 @@ print "STM Rendezvous" print "Should break: %s" % rstm.should_break_transaction() + at expose_primitive(STM_ATOMIC_ENTER, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rstm + + print "STM_ATOMIC_ENTER primitive called" + rstm.increment_atomic() + + at expose_primitive(STM_ATOMIC_LEAVE, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rstm + + print "STM_ATOMIC_LEAVE primitive called" + rstm.decrement_atomic() + @expose_primitive(SUSPEND, unwrap_spec=[object], result_is_new_frame=True, clean_stack=False) def func(interp, s_frame, w_rcvr): From noreply at buildbot.pypy.org Thu Apr 3 11:32:29 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:29 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Implemented Futures. Message-ID: <20140403093229.385071C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r759:d435cc445876 Date: 2014-01-16 23:49 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d435cc445876/ Log: Implemented Futures. usage: f := [41 + 1] async. ^f value diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,2 +1,2 @@ - ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! \ No newline at end of file + ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! BlockClosure organization addCategory: #STM! BlockClosure organization classify: #atomic under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 22:37' prior: 33562201! atomic ^STMAtomic from: self! ! BlockClosure organization classify: #newSTMProcess under: #STM! !BlockClosure methodsFor: 'STM' stamp: '' prior: 42643259! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: '' prior: 33568373! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! BlockClosure organization classify: #parallelFork under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 21:13' prior: 33556059! parallelFork ^ (self newSTMProcess) fork; yourself! ! Object subclass: #STMFuture instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMFuture instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/16/2014 23:34'! invoke ! ! Object subclass: #STMFuture instanceVariableNames: 'block process' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35' prior: 33569341! invoke self process: (self block parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35'! value ! ! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569785! invoke self process: ([self result: self block value] parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569914! value self process wait.! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33570525! value self process wait. ^self result! ! !STMFuture class methodsFor: 'nil' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 23:38'! async ^STMFuture invoke: self! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:38'! benchStmFuture ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:39' prior: 33570998! benchStmFuture | futures | ! ! (1 to: 100) sum! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:40' prior: 33571101! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum ] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571236! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571416! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async] ! ! (1 to: 100) inject: 0 into: [ :i :k | i + k]! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:43' prior: 33571596! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :s :f | s + (f value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33571825! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33572069! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 11:45:18 pm} Squeak4.5-12568.image priorSource: 13507! ----SNAPSHOT----{16 January 2014 . 11:45:23 pm} Squeak4.5-12568.image priorSource: 18085! ----SNAPSHOT----{16 January 2014 . 11:46:35 pm} Squeak4.5-12568.image priorSource: 18176! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index 52d50d90e1a3266b6a85440b56a284a4acc08984..61496f07de6c50ce9c9e38613e4ad2e0846e4c4d GIT binary patch [cut] From noreply at buildbot.pypy.org Thu Apr 3 11:32:38 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:38 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Some experiments with actor-like parallelism Message-ID: <20140403093238.4A0241C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r760:a81003656c0e Date: 2014-01-17 01:25 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a81003656c0e/ Log: Some experiments with actor-like parallelism diff too long, truncating to 2000 out of 2072 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,2 +1,2 @@ - ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! BlockClosure organization addCategory: #STM! BlockClosure organization classify: #atomic under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 22:37' prior: 33562201! atomic ^STMAtomic from: self! ! BlockClosure organization classify: #newSTMProcess under: #STM! !BlockClosure methodsFor: 'STM' stamp: '' prior: 42643259! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: '' prior: 33568373! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! BlockClosure organization classify: #parallelFork under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 21:13' prior: 33556059! parallelFork ^ (self newSTMProcess) fork; yourself! ! Object subclass: #STMFuture instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMFuture instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/16/2014 23:34'! invoke ! ! Object subclass: #STMFuture instanceVariableNames: 'block process' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35' prior: 33569341! invoke self process: (self block parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35'! value ! ! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569785! invoke self process: ([self result: self block value] parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569914! value self process wait.! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33570525! value self process wait. ^self result! ! !STMFuture class methodsFor: 'nil' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 23:38'! async ^STMFuture invoke: self! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:38'! benchStmFuture ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:39' prior: 33570998! benchStmFuture | futures | ! ! (1 to: 100) sum! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:40' prior: 33571101! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum ] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571236! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571416! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async] ! ! (1 to: 100) inject: 0 into: [ :i :k | i + k]! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:43' prior: 33571596! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :s :f | s + (f value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33571825! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33572069! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 11:45:18 pm} Squeak4.5-12568.image priorSource: 13507! ----SNAPSHOT----{16 January 2014 . 11:45:23 pm} Squeak4.5-12568.image priorSource: 18085! ----SNAPSHOT----{16 January 2014 . 11:46:35 pm} Squeak4.5-12568.image priorSource: 18176! \ No newline at end of file + ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! BlockClosure organization addCategory: #STM! BlockClosure organization classify: #atomic under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 22:37' prior: 33562201! atomic ^STMAtomic from: self! ! BlockClosure organization classify: #newSTMProcess under: #STM! !BlockClosure methodsFor: 'STM' stamp: '' prior: 42643259! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: '' prior: 33568373! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! BlockClosure organization classify: #parallelFork under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 21:13' prior: 33556059! parallelFork ^ (self newSTMProcess) fork; yourself! ! Object subclass: #STMFuture instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMFuture instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/16/2014 23:34'! invoke ! ! Object subclass: #STMFuture instanceVariableNames: 'block process' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35' prior: 33569341! invoke self process: (self block parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35'! value ! ! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569785! invoke self process: ([self result: self block value] parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569914! value self process wait.! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33570525! value self process wait. ^self result! ! !STMFuture class methodsFor: 'nil' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 23:38'! async ^STMFuture invoke: self! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:38'! benchStmFuture ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:39' prior: 33570998! benchStmFuture | futures | ! ! (1 to: 100) sum! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:40' prior: 33571101! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum ] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571236! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571416! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async] ! ! (1 to: 100) inject: 0 into: [ :i :k | i + k]! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:43' prior: 33571596! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :s :f | s + (f value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33571825! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33572069! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 11:45:18 pm} Squeak4.5-12568.image priorSource: 13507! ----SNAPSHOT----{16 January 2014 . 11:45:23 pm} Squeak4.5-12568.image priorSource: 18085! ----SNAPSHOT----{16 January 2014 . 11:46:35 pm} Squeak4.5-12568.image priorSource: 18176! Object subclass: #STMWorker instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMWorker instanceVariableNames: 'queue' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33570359! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573142! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! self! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573350! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ self error: 'Future already invoked' ]! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/17/2014 00:24'! initialize super initialize.! ! STMFuture removeSelector: #initialize! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33570648! value self process ifNotNil: [ self process wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33573946! value self process ifNotNil: [ self wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! wait self process wait.! ! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:28'! submit: aBlock callback: aUnaryBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! send: aSymbol with: anArgument ! ! STMWorker removeSelector: #submit:callback:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30' prior: 33574724! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31'! onMessage: aSymbol do: aBlock ! ! STMWorker removeSelector: #on:do:! Object subclass: #STMWorker instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:31'! initialize ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31' prior: 33575225! initialize handlers := Dictionary new.! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue ^ queue! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers ^ handlers! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers: anObject handlers := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33575335! initialize self handlers: Dictionary new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574951! onMessage: aSymbol do: aBlock self handlers at: aSymbol put: aBlock! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574566! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33576170! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:34' prior: 33576299! send: aSymbol with: anArgument ! ! Object subclass: #STMMessage instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name arg' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name args' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! {1. 2.}! {1. 2. World.}! [:i :j | i + j]! [:i :j | i + j] valueWithArguments: {1. 2.}! !STMMessage class methodsFor: 'nil' stamp: 'toma 1/17/2014 00:39'! named: aSymbol withArgs: anArray ^(self new) name: aSymbol; arguments: anArray; yourself! ! Object subclass: #STMMessage instanceVariableNames: 'name arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! name: anObject name := anObject! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments ^ arguments! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments: anObject arguments := anObject! ! Object subclass: #STMMessage instanceVariableNames: 'messageName arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName ^ messageName! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName: anObject messageName := anObject! ! STMMessage removeSelector: #name:! !STMMessage class methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:40' prior: 33577040! named: aSymbol withArgs: anArray ^(self new) messageName: aSymbol; arguments: anArray; yourself! ! a := {1. 2. 3.}! a := OrderedCollection new! a add: 5! a! a add: 5! a! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33576429! send: aSymbol with: anArgument self queue! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33575864! initialize self handlers: Dictionary new. self queue: Stack new.! ! a := Stack new! a := Stack new! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:47' prior: 33578512! initialize self handlers: Dictionary new. self queue: LinkedList new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578372! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol with: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578879! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33579075! send: aSymbol with: anArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument with: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! STMWorker removeSelector: #send:with:with:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50'! loop ! ! Object subclass: #STMWorker instanceVariableNames: 'queue handlers active' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active ^ active! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active: anObject active := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50' prior: 33580221! loop ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580665! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580769! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33580922! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581078! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ | message | [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581273! loop self active: true. [self active] whileTrue: [ | message | [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581519! loop self active: true. [self active] whileTrue: [ | message | message := nil. [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581768! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33582035! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54'! receive ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582318! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582587! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582853! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33575531! queue: aMessage ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33583328! queue: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56' prior: 33583443! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56'! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579275! send: aSymbol with: anArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579689! send: aSymbol with: anArgument and: anotherArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579960! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! STMWorker organization classify: #schedule: under: #'as yet unclassified'! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:56' prior: 33583697! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:00' prior: 33582694! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584800! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584997! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! stop self active: False! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! start [self loop] parallelFork! ! w := STMWorker new! w onMessage: #test do: [:i | Transcript show: i]! w start! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:03' prior: 33583086! receive | message | message := nil. [ (self queue isEmpty) ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! w stop! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:04' prior: 33585522! stop self active: false! ! Smalltalk renameClassNamed: #STMWorker as: #STMActor! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06'! benchStmActor | a1 a2 | ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06' prior: 33586238! benchStmActor | a1 a2 | a1 := STMActor new. a2 := STMActor new. ! ! 1 printString! 1 printString! 1 printString! '1'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586352! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum2 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586563! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:15' prior: 33586879! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] a start. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:18' prior: 33587197! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1.! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33587525! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. From noreply at buildbot.pypy.org Thu Apr 3 11:32:47 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:47 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Minor changes for presentation Message-ID: <20140403093247.6C2611C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r761:89a47e145bab Date: 2014-01-19 21:30 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/89a47e145bab/ Log: Minor changes for presentation diff too long, truncating to 2000 out of 3817 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,2 +1,2 @@ - ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! BlockClosure organization addCategory: #STM! BlockClosure organization classify: #atomic under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 22:37' prior: 33562201! atomic ^STMAtomic from: self! ! BlockClosure organization classify: #newSTMProcess under: #STM! !BlockClosure methodsFor: 'STM' stamp: '' prior: 42643259! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: '' prior: 33568373! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! BlockClosure organization classify: #parallelFork under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 21:13' prior: 33556059! parallelFork ^ (self newSTMProcess) fork; yourself! ! Object subclass: #STMFuture instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMFuture instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/16/2014 23:34'! invoke ! ! Object subclass: #STMFuture instanceVariableNames: 'block process' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35' prior: 33569341! invoke self process: (self block parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35'! value ! ! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569785! invoke self process: ([self result: self block value] parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569914! value self process wait.! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33570525! value self process wait. ^self result! ! !STMFuture class methodsFor: 'nil' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 23:38'! async ^STMFuture invoke: self! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:38'! benchStmFuture ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:39' prior: 33570998! benchStmFuture | futures | ! ! (1 to: 100) sum! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:40' prior: 33571101! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum ] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571236! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571416! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async] ! ! (1 to: 100) inject: 0 into: [ :i :k | i + k]! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:43' prior: 33571596! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :s :f | s + (f value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33571825! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33572069! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 11:45:18 pm} Squeak4.5-12568.image priorSource: 13507! ----SNAPSHOT----{16 January 2014 . 11:45:23 pm} Squeak4.5-12568.image priorSource: 18085! ----SNAPSHOT----{16 January 2014 . 11:46:35 pm} Squeak4.5-12568.image priorSource: 18176! Object subclass: #STMWorker instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMWorker instanceVariableNames: 'queue' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33570359! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573142! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! self! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573350! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ self error: 'Future already invoked' ]! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/17/2014 00:24'! initialize super initialize.! ! STMFuture removeSelector: #initialize! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33570648! value self process ifNotNil: [ self process wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33573946! value self process ifNotNil: [ self wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! wait self process wait.! ! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:28'! submit: aBlock callback: aUnaryBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! send: aSymbol with: anArgument ! ! STMWorker removeSelector: #submit:callback:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30' prior: 33574724! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31'! onMessage: aSymbol do: aBlock ! ! STMWorker removeSelector: #on:do:! Object subclass: #STMWorker instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:31'! initialize ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31' prior: 33575225! initialize handlers := Dictionary new.! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue ^ queue! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers ^ handlers! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers: anObject handlers := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33575335! initialize self handlers: Dictionary new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574951! onMessage: aSymbol do: aBlock self handlers at: aSymbol put: aBlock! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574566! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33576170! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:34' prior: 33576299! send: aSymbol with: anArgument ! ! Object subclass: #STMMessage instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name arg' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name args' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! {1. 2.}! {1. 2. World.}! [:i :j | i + j]! [:i :j | i + j] valueWithArguments: {1. 2.}! !STMMessage class methodsFor: 'nil' stamp: 'toma 1/17/2014 00:39'! named: aSymbol withArgs: anArray ^(self new) name: aSymbol; arguments: anArray; yourself! ! Object subclass: #STMMessage instanceVariableNames: 'name arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! name: anObject name := anObject! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments ^ arguments! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments: anObject arguments := anObject! ! Object subclass: #STMMessage instanceVariableNames: 'messageName arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName ^ messageName! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName: anObject messageName := anObject! ! STMMessage removeSelector: #name:! !STMMessage class methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:40' prior: 33577040! named: aSymbol withArgs: anArray ^(self new) messageName: aSymbol; arguments: anArray; yourself! ! a := {1. 2. 3.}! a := OrderedCollection new! a add: 5! a! a add: 5! a! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33576429! send: aSymbol with: anArgument self queue! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33575864! initialize self handlers: Dictionary new. self queue: Stack new.! ! a := Stack new! a := Stack new! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:47' prior: 33578512! initialize self handlers: Dictionary new. self queue: LinkedList new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578372! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol with: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578879! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33579075! send: aSymbol with: anArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument with: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! STMWorker removeSelector: #send:with:with:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50'! loop ! ! Object subclass: #STMWorker instanceVariableNames: 'queue handlers active' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active ^ active! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active: anObject active := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50' prior: 33580221! loop ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580665! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580769! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33580922! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581078! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ | message | [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581273! loop self active: true. [self active] whileTrue: [ | message | [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581519! loop self active: true. [self active] whileTrue: [ | message | message := nil. [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581768! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33582035! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54'! receive ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582318! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582587! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582853! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33575531! queue: aMessage ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33583328! queue: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56' prior: 33583443! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56'! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579275! send: aSymbol with: anArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579689! send: aSymbol with: anArgument and: anotherArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579960! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! STMWorker organization classify: #schedule: under: #'as yet unclassified'! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:56' prior: 33583697! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:00' prior: 33582694! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584800! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584997! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! stop self active: False! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! start [self loop] parallelFork! ! w := STMWorker new! w onMessage: #test do: [:i | Transcript show: i]! w start! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:03' prior: 33583086! receive | message | message := nil. [ (self queue isEmpty) ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! w stop! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:04' prior: 33585522! stop self active: false! ! Smalltalk renameClassNamed: #STMWorker as: #STMActor! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06'! benchStmActor | a1 a2 | ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06' prior: 33586238! benchStmActor | a1 a2 | a1 := STMActor new. a2 := STMActor new. ! ! 1 printString! 1 printString! 1 printString! '1'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586352! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum2 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586563! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:15' prior: 33586879! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] a start. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:18' prior: 33587197! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1.! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33587525! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.']! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33588158! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.'] a stop. b stop.! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33588833! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.']. a stop. b stop.! ! ----SNAPSHOT----{17 January 2014 . 1:19:41 am} Squeak4.5-12568.image priorSource: 18267! ----SNAPSHOT----{17 January 2014 . 1:23 am} Squeak4.5-12568.image priorSource: 35706! 10 benchStmActor! ----QUIT/NOSAVE----{17 January 2014 . 1:24:53 am} Squeak4.5-12568.image priorSource: 35796! \ No newline at end of file + ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! BlockClosure organization addCategory: #STM! BlockClosure organization classify: #atomic under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 22:37' prior: 33562201! atomic ^STMAtomic from: self! ! BlockClosure organization classify: #newSTMProcess under: #STM! !BlockClosure methodsFor: 'STM' stamp: '' prior: 42643259! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: '' prior: 33568373! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! BlockClosure organization classify: #parallelFork under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 21:13' prior: 33556059! parallelFork ^ (self newSTMProcess) fork; yourself! ! Object subclass: #STMFuture instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMFuture instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/16/2014 23:34'! invoke ! ! Object subclass: #STMFuture instanceVariableNames: 'block process' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35' prior: 33569341! invoke self process: (self block parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35'! value ! ! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569785! invoke self process: ([self result: self block value] parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569914! value self process wait.! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33570525! value self process wait. ^self result! ! !STMFuture class methodsFor: 'nil' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock From noreply at buildbot.pypy.org Thu Apr 3 11:32:48 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:48 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: File out sources! (puuuh... conflicted changes not nice) Message-ID: <20140403093248.830C01C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r762:faed200eb69c Date: 2014-01-20 11:24 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/faed200eb69c/ Log: File out sources! (puuuh... conflicted changes not nice) diff --git a/images/Integer-benchStmAtomic.st b/images/Integer-benchStmAtomic.st new file mode 100644 --- /dev/null +++ b/images/Integer-benchStmAtomic.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 10:59:50 am'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07'! benchStmAtomic | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k.] atomic value. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! \ No newline at end of file diff --git a/images/Integer-benchStmFuture.st b/images/Integer-benchStmFuture.st new file mode 100644 --- /dev/null +++ b/images/Integer-benchStmFuture.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 10:59:37 am'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:36'! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! \ No newline at end of file diff --git a/images/Integer-benchStmParallel.st b/images/Integer-benchStmParallel.st new file mode 100644 --- /dev/null +++ b/images/Integer-benchStmParallel.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 10:59:46 am'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07'! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! \ No newline at end of file diff --git a/images/STMActor.st b/images/STMActor.st new file mode 100644 --- /dev/null +++ b/images/STMActor.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:35 am'! Object subclass: #STMActor instanceVariableNames: 'queue handlers active' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:47'! initialize self handlers: Dictionary new. self queue: LinkedList new.! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32'! onMessage: aSymbol do: aBlock self handlers at: aSymbol put: aBlock! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:03'! receive | message | message := nil. [ (self queue isEmpty) ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:56'! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58'! send: aSymbol with: anArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58'! send: aSymbol with: anArgument and: anotherArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58'! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! start [self loop] parallelFork! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:04'! stop self active: false! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active ^ active! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active: anObject active := anObject! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers ^ handlers! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers: anObject handlers := anObject! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue ^ queue! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56'! queue: anObject queue := anObject! ! \ No newline at end of file diff --git a/images/STMAtomic.st b/images/STMAtomic.st new file mode 100644 --- /dev/null +++ b/images/STMAtomic.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:40 am'! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28'! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01'! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMAtomic class instanceVariableNames: ''! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! \ No newline at end of file diff --git a/images/STMFuture.st b/images/STMFuture.st new file mode 100644 --- /dev/null +++ b/images/STMFuture.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:42 am'! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23'! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ self error: 'Future already invoked' ]! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! value self process ifNotNil: [ self wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! wait self process wait.! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMFuture class instanceVariableNames: ''! !STMFuture class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! \ No newline at end of file diff --git a/images/STMMessage.st b/images/STMMessage.st new file mode 100644 --- /dev/null +++ b/images/STMMessage.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:44 am'! Object subclass: #STMMessage instanceVariableNames: 'messageName arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments ^ arguments! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments: anObject arguments := anObject! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName ^ messageName! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName: anObject messageName := anObject! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMMessage class instanceVariableNames: ''! !STMMessage class methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:40'! named: aSymbol withArgs: anArray ^(self new) messageName: aSymbol; arguments: anArray; yourself! ! \ No newline at end of file diff --git a/images/STMProcess.st b/images/STMProcess.st new file mode 100644 --- /dev/null +++ b/images/STMProcess.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:05:53 am'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! fork Transcript show: '* STM Process did not fork *' , Character cr. self resume! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! initialize lock := 1. super initialize! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! signal Transcript show: ' Failed to signal process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! wait self primWait ! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMProcess class instanceVariableNames: ''! !STMProcess class methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! forContext: t1 priority: t2 | t3 | t3 := self new. t3 suspendedContext: t1. t3 priority: t2. ^ t3 ! ! \ No newline at end of file From noreply at buildbot.pypy.org Thu Apr 3 11:32:49 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:49 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: SPyVM-GameOfLife created online with Bitbucket Message-ID: <20140403093249.A48531C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r763:499363dfd430 Date: 2014-01-21 09:42 +0000 http://bitbucket.org/pypy/lang-smalltalk/changeset/499363dfd430/ Log: SPyVM-GameOfLife created online with Bitbucket diff --git a/images/SPyVM-GameOfLife b/images/SPyVM-GameOfLife new file mode 100644 --- /dev/null +++ b/images/SPyVM-GameOfLife @@ -0,0 +1,530 @@ +Object subclass: #GameOfLifeField + instanceVariableNames: 'data height width' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!GameOfLifeField methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:23'! +cellAliveAt: x at: y + + ^ (self at: x at: y) = 1! ! + +!GameOfLifeField methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:14'! +rows: numberOfRows columns: numberOfColumns + + self height: numberOfRows. + self width: numberOfColumns. + self data: (Matrix rows: numberOfRows columns: numberOfColumns element: 0). + ! ! + + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:25'! +at: x at:y + + ((x < 1) or: [x > self width]) ifTrue: [ ^ 0 ]. + ((y < 1) or: [y > self height]) ifTrue: [ ^ 0 ]. + + ^ self data at: y at: x! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:25'! +at: x at:y put: aValue + + self data at: y at: x put: aValue.! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:02'! +atRow: rowNumber put: aRow + + self data atRow: rowNumber put: aRow! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +data + + ^ data! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +data: anObject + + data := anObject! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +height + + ^ height! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +height: anObject + + height := anObject! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:52'! +print + + | resultString | + resultString := ''. + (1 to: self height) do: [:y | + (1 to: self width) do: [ :x | + resultString := resultString , (self data at: y at: x).]. + resultString := resultString , Character cr ]. + ^ resultString ! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +rowSlice: sliceSize collect: aBlock + + ! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:28'! +rowwiseFrom: startRow to: endRow collect: aBlock + + | newField | + newField := GameOfLifeFieldSlice from: startRow to: endRow width: self width. + (startRow to: endRow) do: [ :y | + (1 to: self width) do: [ :x | newField at: x at: y put: (aBlock value: self value: x value: y) ] ]. + ^ newField! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +width + + ^ width! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +width: anObject + + width := anObject! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +GameOfLifeField class + instanceVariableNames: ''! + +!GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:44'! +gliderFieldRows: numberOfRows columns: numberOfColumns + + | newField | + newField := self new rows: numberOfRows columns: numberOfColumns. + + newField + at: 8 at: 5 put: 1; + at: 9 at: 5 put: 1; + at: 10 at: 5 put: 1; + at: 10 at: 4 put: 1; + at: 9 at: 3 put: 1. + + ^ newField! ! + +!GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 20:43'! +rows: numberOfRows columns: numberOfColumns + + ^ self new rows: numberOfRows columns: numberOfColumns! ! + + +GameOfLifeField subclass: #GameOfLifeFieldSlice + instanceVariableNames: 'startRow endRow' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!GameOfLifeFieldSlice methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 10:22'! +from: startRow to: endRow width: width + + self startRow: startRow; + endRow: endRow; + width: width; + height: (endRow - startRow + 1); + data: (Matrix rows: (endRow - startRow + 1) columns: width). + + ^ self! ! + +!GameOfLifeFieldSlice methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:31'! +rowwiseDo: aBlock + + self startRow to: self endRow do: [ :rowNumber | + aBlock value: rowNumber value: (self data atRow: (rowNumber - self startRow) + 1). + ].! ! + + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:30'! +at: x at:y put: aValue + + self data at: y + 1 - self startRow at: x put: aValue.! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +endRow + + ^ endRow! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +endRow: anObject + + endRow := anObject! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +startRow + + ^ startRow! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +startRow: anObject + + startRow := anObject! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +GameOfLifeFieldSlice class + instanceVariableNames: ''! + +!GameOfLifeFieldSlice class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 20:53'! +from: startRow to: endRow width: width + + ^ self new from: startRow to: endRow width: width! ! + + +Object subclass: #STMSimulation + instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +initialField: aGameOfLifeField + + self field: aGameOfLifeField. + self fieldNew: (GameOfLifeField rows: (aGameOfLifeField height) columns: (aGameOfLifeField width)).! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:50'! +simulateRound: roundNumber + + self processes: ((1 to: self numberOfProcesses) collect: [ :processNumber | + [| rows | + rows := self startProcess: processNumber. + rows rowwiseDo: [ :rowNumber :aRow | self fieldNew atRow: rowNumber put: aRow ]] parallelFork. ]). +! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + "Transcript show: self fieldNew print. + Transcript show: Character cr." + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +startProcess: processNumber + + | endOfSlice slice startOfSlice | + slice := (self field height / self numberOfProcesses). + startOfSlice := ((processNumber - 1) * slice) + 1. + endOfSlice := processNumber * slice. + + ^ self field rowwiseFrom: startOfSlice + to: endOfSlice + collect: [ :tempField :x :y | self thumbUpOrDownAt: x at: y on: tempField ] + + + ! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +thumbUpOrDownAt: x at: y on: tempField + | liveCellCount | + + liveCellCount := (tempField at: x - 1 at: y - 1) + + (tempField at: x + 0 at: y - 1) + + (tempField at: x + 1 at: y - 1) + + (tempField at: x - 1 at: y + 0) + + (tempField at: x + 1 at: y + 0) + + (tempField at: x - 1 at: y + 1) + + (tempField at: x + 0 at: y + 1) + + (tempField at: x + 1 at: y + 1). + + (tempField cellAliveAt: x at: y) + ifTrue: [((2 = liveCellCount) + or: [liveCellCount = 3]) + ifTrue: [^ 1] + ifFalse: [^ 0]] + ifFalse: [(liveCellCount = 3) + ifTrue: [^ 1] + ifFalse: [^ 0]]! ! + + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +field + + ^ field! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +field: anObject + + field := anObject! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldNew + + ^ fieldNew! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldNew: anObject + + fieldNew := anObject! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldSlices + + ^ fieldSlices! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldSlices: anObject + + fieldSlices := anObject! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +numberOfProcesses + + ^ numberOfProcesses! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +numberOfProcesses: aNumber + + numberOfProcesses := aNumber + ! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +processes + + ^ processes! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +processes: anObject + + processes := anObject! ! + + +!STMSimulation methodsFor: 'initialize-release' stamp: 'pre 1/21/2014 09:44'! +initialize + + self processes: OrderedCollection new. + ! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +STMSimulation class + instanceVariableNames: ''! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +benchmark2 + + ^ (1 to: 5) collect: [ :i | + [ self standardSimulation2: (2 raisedTo: i) ] timeToRun ]! ! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +standardSimulation2: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +standardSimulation: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! + + +Object subclass: #Simulation + instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:37'! +initialField: aGameOfLifeField + + self field: aGameOfLifeField. + self fieldNew: (GameOfLifeField rows: (aGameOfLifeField height) columns: (aGameOfLifeField width)).! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:44'! +simulateRound: roundNumber + + self processes: ((1 to: self numberOfProcesses) collect: [ :processNumber | + | semaphore | + semaphore := Semaphore new. + [| rows | + rows := self startProcess: processNumber. + rows rowwiseDo: [ :rowNumber :aRow | self fieldNew atRow: rowNumber put: aRow ]. + semaphore signal] fork. + semaphore ]). +! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:00'! +simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + "Transcript show: self fieldNew print. + Transcript show: Character cr." + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:34'! +startProcess: processNumber + + | endOfSlice slice startOfSlice | + slice := (self field height / self numberOfProcesses). + startOfSlice := ((processNumber - 1) * slice) + 1. + endOfSlice := processNumber * slice. + + ^ self field rowwiseFrom: startOfSlice + to: endOfSlice + collect: [ :tempField :x :y | self thumbUpOrDownAt: x at: y on: tempField ] + + + ! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 10:48'! +thumbUpOrDownAt: x at: y on: tempField + | liveCellCount | + + liveCellCount := (tempField at: x - 1 at: y - 1) + + (tempField at: x + 0 at: y - 1) + + (tempField at: x + 1 at: y - 1) + + (tempField at: x - 1 at: y + 0) + + (tempField at: x + 1 at: y + 0) + + (tempField at: x - 1 at: y + 1) + + (tempField at: x + 0 at: y + 1) + + (tempField at: x + 1 at: y + 1). + + (tempField cellAliveAt: x at: y) + ifTrue: [((2 = liveCellCount) + or: [liveCellCount = 3]) + ifTrue: [^ 1] + ifFalse: [^ 0]] + ifFalse: [(liveCellCount = 3) + ifTrue: [^ 1] + ifFalse: [^ 0]]! ! + + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +field + + ^ field! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +field: anObject + + field := anObject! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:06'! +fieldNew + + ^ fieldNew! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:06'! +fieldNew: anObject + + fieldNew := anObject! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:30'! +fieldSlices + + ^ fieldSlices! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:30'! +fieldSlices: anObject + + fieldSlices := anObject! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:09'! +numberOfProcesses + + ^ numberOfProcesses! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:09'! +numberOfProcesses: aNumber + + numberOfProcesses := aNumber + ! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +processes + + ^ processes! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +processes: anObject + + processes := anObject! ! + + +!Simulation methodsFor: 'initialize-release' stamp: 'pre 1/19/2014 20:04'! +initialize + + self processes: OrderedCollection new. + ! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +Simulation class + instanceVariableNames: ''! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:08'! +benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:09'! +benchmark2 + + ^ (1 to: 5) collect: [ :i | + [ self standardSimulation2: (2 raisedTo: i) ] timeToRun ]! ! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:59'! +standardSimulation2: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:59'! +standardSimulation: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! From noreply at buildbot.pypy.org Thu Apr 3 11:32:58 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:32:58 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Filed in Patricks' GoL Benchmarks Message-ID: <20140403093258.97E111C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: Changeset: r764:8bea93d9c62c Date: 2014-01-21 11:11 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/8bea93d9c62c/ Log: Filed in Patricks' GoL Benchmarks diff too long, truncating to 2000 out of 5214 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,2 +1,308 @@ - ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! BlockClosure organization addCategory: #STM! BlockClosure organization classify: #atomic under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 22:37' prior: 33562201! atomic ^STMAtomic from: self! ! BlockClosure organization classify: #newSTMProcess under: #STM! !BlockClosure methodsFor: 'STM' stamp: '' prior: 42643259! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: '' prior: 33568373! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! BlockClosure organization classify: #parallelFork under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 21:13' prior: 33556059! parallelFork ^ (self newSTMProcess) fork; yourself! ! Object subclass: #STMFuture instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMFuture instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/16/2014 23:34'! invoke ! ! Object subclass: #STMFuture instanceVariableNames: 'block process' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35' prior: 33569341! invoke self process: (self block parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35'! value ! ! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569785! invoke self process: ([self result: self block value] parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569914! value self process wait.! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33570525! value self process wait. ^self result! ! !STMFuture class methodsFor: 'nil' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 23:38'! async ^STMFuture invoke: self! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:38'! benchStmFuture ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:39' prior: 33570998! benchStmFuture | futures | ! ! (1 to: 100) sum! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:40' prior: 33571101! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum ] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571236! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571416! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async] ! ! (1 to: 100) inject: 0 into: [ :i :k | i + k]! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:43' prior: 33571596! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :s :f | s + (f value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33571825! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33572069! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 11:45:18 pm} Squeak4.5-12568.image priorSource: 13507! ----SNAPSHOT----{16 January 2014 . 11:45:23 pm} Squeak4.5-12568.image priorSource: 18085! ----SNAPSHOT----{16 January 2014 . 11:46:35 pm} Squeak4.5-12568.image priorSource: 18176! Object subclass: #STMWorker instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMWorker instanceVariableNames: 'queue' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33570359! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573142! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! self! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573350! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ self error: 'Future already invoked' ]! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/17/2014 00:24'! initialize super initialize.! ! STMFuture removeSelector: #initialize! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33570648! value self process ifNotNil: [ self process wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33573946! value self process ifNotNil: [ self wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! wait self process wait.! ! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:28'! submit: aBlock callback: aUnaryBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! send: aSymbol with: anArgument ! ! STMWorker removeSelector: #submit:callback:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30' prior: 33574724! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31'! onMessage: aSymbol do: aBlock ! ! STMWorker removeSelector: #on:do:! Object subclass: #STMWorker instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:31'! initialize ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31' prior: 33575225! initialize handlers := Dictionary new.! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue ^ queue! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers ^ handlers! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers: anObject handlers := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33575335! initialize self handlers: Dictionary new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574951! onMessage: aSymbol do: aBlock self handlers at: aSymbol put: aBlock! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574566! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33576170! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:34' prior: 33576299! send: aSymbol with: anArgument ! ! Object subclass: #STMMessage instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name arg' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name args' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! {1. 2.}! {1. 2. World.}! [:i :j | i + j]! [:i :j | i + j] valueWithArguments: {1. 2.}! !STMMessage class methodsFor: 'nil' stamp: 'toma 1/17/2014 00:39'! named: aSymbol withArgs: anArray ^(self new) name: aSymbol; arguments: anArray; yourself! ! Object subclass: #STMMessage instanceVariableNames: 'name arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! name: anObject name := anObject! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments ^ arguments! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments: anObject arguments := anObject! ! Object subclass: #STMMessage instanceVariableNames: 'messageName arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName ^ messageName! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName: anObject messageName := anObject! ! STMMessage removeSelector: #name:! !STMMessage class methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:40' prior: 33577040! named: aSymbol withArgs: anArray ^(self new) messageName: aSymbol; arguments: anArray; yourself! ! a := {1. 2. 3.}! a := OrderedCollection new! a add: 5! a! a add: 5! a! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33576429! send: aSymbol with: anArgument self queue! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33575864! initialize self handlers: Dictionary new. self queue: Stack new.! ! a := Stack new! a := Stack new! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:47' prior: 33578512! initialize self handlers: Dictionary new. self queue: LinkedList new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578372! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol with: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578879! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33579075! send: aSymbol with: anArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument with: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! STMWorker removeSelector: #send:with:with:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50'! loop ! ! Object subclass: #STMWorker instanceVariableNames: 'queue handlers active' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active ^ active! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active: anObject active := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50' prior: 33580221! loop ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580665! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580769! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33580922! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581078! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ | message | [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581273! loop self active: true. [self active] whileTrue: [ | message | [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581519! loop self active: true. [self active] whileTrue: [ | message | message := nil. [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581768! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33582035! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54'! receive ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582318! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582587! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582853! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33575531! queue: aMessage ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33583328! queue: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56' prior: 33583443! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56'! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579275! send: aSymbol with: anArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579689! send: aSymbol with: anArgument and: anotherArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579960! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! STMWorker organization classify: #schedule: under: #'as yet unclassified'! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:56' prior: 33583697! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:00' prior: 33582694! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584800! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584997! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! stop self active: False! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! start [self loop] parallelFork! ! w := STMWorker new! w onMessage: #test do: [:i | Transcript show: i]! w start! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:03' prior: 33583086! receive | message | message := nil. [ (self queue isEmpty) ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! w stop! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:04' prior: 33585522! stop self active: false! ! Smalltalk renameClassNamed: #STMWorker as: #STMActor! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06'! benchStmActor | a1 a2 | ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06' prior: 33586238! benchStmActor | a1 a2 | a1 := STMActor new. a2 := STMActor new. ! ! 1 printString! 1 printString! 1 printString! '1'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586352! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum2 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586563! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:15' prior: 33586879! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] a start. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:18' prior: 33587197! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1.! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33587525! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.']! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33588158! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.'] a stop. b stop.! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33588833! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.']. a stop. b stop.! ! ----SNAPSHOT----{17 January 2014 . 1:19:41 am} Squeak4.5-12568.image priorSource: 18267! ----SNAPSHOT----{17 January 2014 . 1:23 am} Squeak4.5-12568.image priorSource: 35706! 10 benchStmActor! ----QUIT/NOSAVE----{17 January 2014 . 1:24:53 am} Squeak4.5-12568.image priorSource: 35796! ----STARTUP----{17 January 2014 . 5:10:50 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:00' prior: 33566346! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:00:29 pm} Squeak4.5-12568.image priorSource: 35796! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:01' prior: 33566996! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:01' prior: 33591037! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:01:14 pm} Squeak4.5-12568.image priorSource: 36433! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:18' prior: 33591357! benchStmParallel | sum num threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:20' prior: 33591767! benchStmParallel | sum num threads | num := self \\ 100. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:20' prior: 33592091! benchStmParallel | sum num threads max | num := self \\ 100. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:20' prior: 33592436! benchStmParallel | sum num threads max | num := self \\ 100. max := self - num. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33592785! benchStmParallel | sum num threads max | num := self \\ 100. max := self - num. sum := 0. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33590615! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33593523! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. sum := 0. threads := (0 to: num) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33593917! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. sum := 0. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33594313! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:21:58 pm} Squeak4.5-12568.image priorSource: 37163! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33572325! benchStmFuture | sum num threads max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33595178! benchStmFuture | sum num max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33595523! benchStmFuture | sum num max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33595860! benchStmFuture | sum num max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:26' prior: 33596199! benchStmFuture | sum num max futures | num := self \\ 100. max := (self - num) \\ num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:26' prior: 33596537! benchStmFuture | sum num max futures | num := self \\ 100. max := (self - num) // num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:26' prior: 33596884! benchStmFuture | sum num max futures | num := self \\ 100. max := self // num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:26:27 pm} Squeak4.5-12568.image priorSource: 40574! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:20' prior: 33597231! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! Time now! Time now! Time now! Time now! Time now asNanoSeconds! Time now asNanoSeconds! Time now asNanoSeconds! Time now asNanoSeconds! Time now asNanoSeconds! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:25' prior: 33597660! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: 'µs'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:25' prior: 33598176! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: 'µs inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:25' prior: 33598657! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] µs inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:26' prior: 33599153! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! ----SNAPSHOT----{18 January 2014 . 3:26:07 pm} Squeak4.5-12568.image priorSource: 43056! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:30' prior: 33599946! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:31' prior: 33600550! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:36' prior: 33601163! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! ----SNAPSHOT----{18 January 2014 . 4:36:36 pm} Squeak4.5-12568.image priorSource: 45946! ----SNAPSHOT----{18 January 2014 . 4:36:48 pm} Squeak4.5-12568.image priorSource: 47883! ----STARTUP----{18 January 2014 . 10:05:52 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:06' prior: 33593154! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:06' prior: 33602684! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07' prior: 33603203! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07' prior: 33594707! benchStmAtomic | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k.] atomic value. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! ----SNAPSHOT----{18 January 2014 . 10:07:37 pm} Squeak4.5-12568.image priorSource: 47973! ----SNAPSHOT----{18 January 2014 . 10:48:10 pm} Squeak4.5-12568.image priorSource: 50518! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59' prior: 33601776! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59'! benchStmFuture2 | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59' prior: 33605214! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59' prior: 33606505! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. From noreply at buildbot.pypy.org Thu Apr 3 11:33:07 2014 From: noreply at buildbot.pypy.org (amintos) Date: Thu, 3 Apr 2014 11:33:07 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk demo: Demo VM (without debug prints) and Game-of-Life image via "-m gol" Message-ID: <20140403093307.A31141C022D@cobra.cs.uni-duesseldorf.de> Author: amintos Branch: demo Changeset: r765:8898f3e243d6 Date: 2014-01-21 14:13 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/8898f3e243d6/ Log: Demo VM (without debug prints) and Game-of-Life image via "-m gol" diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -305,4 +305,139 @@ initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); simulateRounds: 5. - ! ! ----End fileIn of a stream----! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 10:55'! gol STMSimulation benchmark! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 10:56' prior: 33632815! gol STMSimulation benchmark printString! ! ----SNAPSHOT----{21 January 2014 . 10:56:17 am} Squeak4.5-12568.image priorSource: 63448! ^STMSimulation benchmark printString! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 10:59' prior: 33632930! gol ^STMSimulation benchmark printString! ! ----QUIT/NOSAVE----{21 January 2014 . 11:00:27 am} Squeak4.5-12568.image priorSource: 78543! ----STARTUP----{21 January 2014 . 11:00:33 am} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 11:00' prior: 33632930! gol ^ STMSimulation benchmark printString! ! ----SNAPSHOT----{21 January 2014 . 11:00:44 am} Squeak4.5-12568.image priorSource: 78543! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 11:04'! gol2 ^ STMSimulation benchmark2 printString! ! Simulation benchmark! ----SNAPSHOT----{21 January 2014 . 11:06:45 am} Squeak4.5-12568.image priorSource: 79132! \ No newline at end of file + ! ! ----End fileIn of a stream----! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 10:55'! gol STMSimulation benchmark! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 10:56' prior: 33632815! gol STMSimulation benchmark printString! ! ----SNAPSHOT----{21 January 2014 . 10:56:17 am} Squeak4.5-12568.image priorSource: 63448! ^STMSimulation benchmark printString! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 10:59' prior: 33632930! gol ^STMSimulation benchmark printString! ! ----QUIT/NOSAVE----{21 January 2014 . 11:00:27 am} Squeak4.5-12568.image priorSource: 78543! ----STARTUP----{21 January 2014 . 11:00:33 am} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 11:00' prior: 33632930! gol ^ STMSimulation benchmark printString! ! ----SNAPSHOT----{21 January 2014 . 11:00:44 am} Squeak4.5-12568.image priorSource: 78543! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/21/2014 11:04'! gol2 ^ STMSimulation benchmark2 printString! ! Simulation benchmark! ----SNAPSHOT----{21 January 2014 . 11:06:45 am} Squeak4.5-12568.image priorSource: 79132! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:05'! print ! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'toma 1/21/2014 13:09' prior: 33619796! print + + | resultString | + resultString := ''. + (1 to: self height) do: [:y | + (1 to: self width) do: [ :x | + resultString := resultString , ((self data at: y at: x) = 1) ifTrue: ['[]'] ifFalse: ['. '].]. + resultString := resultString , Character cr ]. + ^ resultString ! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'toma 1/21/2014 13:09' prior: 33634066! print + + | resultString | + resultString := ''. + (1 to: self height) do: [:y | + (1 to: self width) do: [ :x | + resultString := resultString , ((self data at: y at: x) > 0) ifTrue: ['[]'] ifFalse: ['. '].]. + resultString := resultString , Character cr ]. + ^ resultString ! ! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:11' prior: 33624205! simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + SPyVM print: (self fieldNew print). + SPyVM print: Character cr. + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! ----SNAPSHOT----{21 January 2014 . 1:11:46 pm} Squeak4.5-12568.image priorSource: 79361! !GameOfLifeField methodsFor: 'accessing' stamp: 'toma 1/21/2014 13:12' prior: 33634432! print + + | resultString | + resultString := ''. + (1 to: self height) do: [:y | + (1 to: self width) do: [ :x | + resultString := resultString , (((self data at: y at: x) > 0) ifTrue: ['[]'] ifFalse: ['. ']).]. + resultString := resultString , Character cr ]. + ^ resultString ! ! ----SNAPSHOT----{21 January 2014 . 1:13:04 pm} Squeak4.5-12568.image priorSource: 80734! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:32' prior: 33556598! wait self primWait! ! ----SNAPSHOT----{21 January 2014 . 1:44:03 pm} Squeak4.5-12568.image priorSource: 81192! !GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:48' prior: 33620913! gliderFieldRows: numberOfRows columns: numberOfColumns + + | newField | + newField := self new rows: numberOfRows columns: numberOfColumns. + + newField + at: 8 at: 5 put: 1; + at: 9 at: 5 put: 1; + at: 10 at: 5 put: 1; + at: 10 at: 4 put: 1; + at: 9 at: 3 put: 1; at: 13 at: 13 put: 1; + at: 13 at: 14 put: 1; + at: 13 at: 15 put: 1; + at: 14 at: 13 put: 1; + at: 12 at: 14 put: 1. + + ^ newField! ! ----SNAPSHOT----{21 January 2014 . 1:48:18 pm} Squeak4.5-12568.image priorSource: 81400! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:52' prior: 33627087! benchmark + + ^ (1 to: 3) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !Simulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:52' prior: 33631876! benchmark + + ^ (1 to: 3) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !Simulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:52' prior: 33636825! benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:52' prior: 33636623! benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:58' prior: 33627744! standardSimulation: numberOfProcesses + SPyVM print:'================================================================'. + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! ----SNAPSHOT----{21 January 2014 . 1:58:07 pm} Squeak4.5-12568.image priorSource: 81998! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:58'! benchmark3 + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 13:59' prior: 33637887! benchmark3 + | | + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 14:00' prior: 33638093! benchmark3 + | field | field := GameOfLifeField gliderFieldRows: 32 columns: 32. + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 14:00' prior: 33638307! benchmark3 + | field | field := GameOfLifeField gliderFieldRows: 32 columns: 32. + ^ (1 to: 4) collect: [ :i | + [ field := self standardSimulation: (2 raisedTo: i) withField: field] timeToRun ]! ! STMSimulation class removeSelector: #benchmark3! ----SNAPSHOT----{21 January 2014 . 2:00:54 pm} Squeak4.5-12568.image priorSource: 83278! !GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 14:04' prior: 33636027! gliderFieldRows: numberOfRows columns: numberOfColumns + + | newField | + newField := self new rows: numberOfRows columns: numberOfColumns. + + newField + at: 8 at: 5 put: 1; + at: 9 at: 5 put: 1; + at: 10 at: 5 put: 1; + at: 10 at: 4 put: 1; + at: 9 at: 3 put: 1; at: 13 at: 13 put: 1; + at: 13 at: 14 put: 1; + at: 13 at: 15 put: 1; + at: 14 at: 13 put: 1; + at: 12 at: 14 put: 1. numberOfRows > 16 ifTrue: [ newField at: 20 at: 3 put: 1; at: 20 at: 4 put: 1; at: 21 at: 2 put: 1; at: 21 at: 5 put: 1; at: 22 at: 3 put: 1; at: 22 at: 4 put: 1. ]. + + ^ newField! ! !GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 14:04' prior: 33639032! gliderFieldRows: numberOfRows columns: numberOfColumns + + | newField | + newField := self new rows: numberOfRows columns: numberOfColumns. + + newField + at: 8 at: 5 put: 1; + at: 9 at: 5 put: 1; + at: 10 at: 5 put: 1; + at: 10 at: 4 put: 1; + at: 9 at: 3 put: 1; at: 13 at: 13 put: 1; + at: 13 at: 14 put: 1; + at: 13 at: 15 put: 1; + at: 14 at: 13 put: 1; + at: 12 at: 14 put: 1. ((numberOfRows > 16) and: (numberOfColumns > 16)) ifTrue: [ newField at: 20 at: 3 put: 1; at: 20 at: 4 put: 1; at: 21 at: 2 put: 1; at: 21 at: 5 put: 1; at: 22 at: 3 put: 1; at: 22 at: 4 put: 1. ]. + + ^ newField! ! !GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'toma 1/21/2014 14:05' prior: 33639731! gliderFieldRows: numberOfRows columns: numberOfColumns + + | newField | + newField := self new rows: numberOfRows columns: numberOfColumns. + + newField + at: 8 at: 5 put: 1; + at: 9 at: 5 put: 1; + at: 10 at: 5 put: 1; + at: 10 at: 4 put: 1; + at: 9 at: 3 put: 1; at: 13 at: 13 put: 1; + at: 13 at: 14 put: 1; + at: 13 at: 15 put: 1; + at: 14 at: 13 put: 1; + at: 12 at: 14 put: 1. ((numberOfRows > 16) and: (numberOfColumns > 16)) ifTrue: [ newField at: 20 at: 3 put: 1; at: 20 at: 4 put: 1; at: 21 at: 2 put: 1; at: 21 at: 5 put: 1; at: 22 at: 3 put: 1; at: 22 at: 4 put: 1; at: 20 at: 20 put: 1; at: 20 at: 21 put: 1; at: 20 at: 22 put: 1. ]. + + ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index 457742e624345214ad96d9f0cc623b4f97ce29aa..c82053adc6b6192d6d257485b467b335d26d49a3 GIT binary patch [cut] diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -14,6 +14,8 @@ pass from rpython.rlib import rthread +THREAD_DEBUG = False + class MissingBytecode(Exception): """Bytecode not implemented yet.""" def __init__(self, bytecodename): @@ -100,7 +102,7 @@ # HUGE RACE CONDITON!!! def bootstrap(): - print "New thread reporting" + #print "New thread reporting" interp = bootstrapper.interp w_frame = bootstrapper.w_frame w_stm_process = bootstrapper.w_stm_process @@ -179,17 +181,17 @@ new_interp.interrupt_check_counter = self.interrupt_check_counter new_interp.trace_proxy = self.trace_proxy - print 'Interpreter state copied' + #print 'Interpreter state copied' # bootstrapping from (lock-guarded) global state: bootstrapper.acquire(new_interp, w_frame, w_stm_process) - print "Thread initialized" + #print "Thread initialized" # TODO: Deadlocks if the thread before died without calling bootstrapper.release() rthread.start_new_thread(bootstrapper.bootstrap, ()) - print "Parent interpreter resuming" + #print "Parent interpreter resuming" def interpret_with_w_frame(self, w_frame, may_context_switch=True): - print "Interpreter starting" + if THREAD_DEBUG: print "[Thread] Interpreter starting" rstm.set_transaction_length(10000) # from pypy try: self.loop(w_frame) @@ -276,7 +278,7 @@ # gonna go parallel! (triggered by primitive) except StmProcessFork, f: - print "Interpreter loop about to fork" + #print "Interpreter loop about to fork" self.fork(f.w_frame, f.w_stm_process) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1416,9 +1416,10 @@ #print "STM_WAIT primitive called" # wait(0) behaves like a barrier, it waits for but does not acquire the lock + #print "[Thread] wait" wrapper.StmProcessWrapper(interp.space, w_rcvr).wait(0, 'primitive') - print "STM Rendezvous" - print "Should break: %s" % rstm.should_break_transaction() + #print "[Thread] join" + #print "Should break: %s" % rstm.should_break_transaction() @expose_primitive(STM_ATOMIC_ENTER, unwrap_spec=[object], no_result=True) def func(interp, s_frame, w_rcvr): diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -182,7 +182,7 @@ w_frame = self.suspended_context() assert isinstance(w_frame, model.W_PointersObject) - print "Breaking interpreter loop for forking" + #print "Breaking interpreter loop for forking" # we need to pass control to the interpreter loop here self.store_lock(1) raise StmProcessFork(w_frame, self._w_self) From noreply at buildbot.pypy.org Thu Apr 3 11:33:19 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 3 Apr 2014 11:33:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stm-c4: merge stm work from seminar Message-ID: <20140403093319.A72BA1C022D@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: stm-c4 Changeset: r766:ff8f7dcf99b5 Date: 2014-04-02 12:03 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ff8f7dcf99b5/ Log: merge stm work from seminar diff too long, truncating to 2000 out of 364596 lines diff --git a/images/Integer-benchStmAtomic.st b/images/Integer-benchStmAtomic.st new file mode 100644 --- /dev/null +++ b/images/Integer-benchStmAtomic.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 10:59:50 am'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07'! benchStmAtomic | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k.] atomic value. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! \ No newline at end of file diff --git a/images/Integer-benchStmFuture.st b/images/Integer-benchStmFuture.st new file mode 100644 --- /dev/null +++ b/images/Integer-benchStmFuture.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 10:59:37 am'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:36'! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! \ No newline at end of file diff --git a/images/Integer-benchStmParallel.st b/images/Integer-benchStmParallel.st new file mode 100644 --- /dev/null +++ b/images/Integer-benchStmParallel.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 10:59:46 am'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07'! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! \ No newline at end of file diff --git a/images/SPyVM-GameOfLife b/images/SPyVM-GameOfLife new file mode 100644 --- /dev/null +++ b/images/SPyVM-GameOfLife @@ -0,0 +1,530 @@ +Object subclass: #GameOfLifeField + instanceVariableNames: 'data height width' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!GameOfLifeField methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:23'! +cellAliveAt: x at: y + + ^ (self at: x at: y) = 1! ! + +!GameOfLifeField methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:14'! +rows: numberOfRows columns: numberOfColumns + + self height: numberOfRows. + self width: numberOfColumns. + self data: (Matrix rows: numberOfRows columns: numberOfColumns element: 0). + ! ! + + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:25'! +at: x at:y + + ((x < 1) or: [x > self width]) ifTrue: [ ^ 0 ]. + ((y < 1) or: [y > self height]) ifTrue: [ ^ 0 ]. + + ^ self data at: y at: x! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:25'! +at: x at:y put: aValue + + self data at: y at: x put: aValue.! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:02'! +atRow: rowNumber put: aRow + + self data atRow: rowNumber put: aRow! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +data + + ^ data! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +data: anObject + + data := anObject! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +height + + ^ height! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +height: anObject + + height := anObject! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:52'! +print + + | resultString | + resultString := ''. + (1 to: self height) do: [:y | + (1 to: self width) do: [ :x | + resultString := resultString , (self data at: y at: x).]. + resultString := resultString , Character cr ]. + ^ resultString ! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +rowSlice: sliceSize collect: aBlock + + ! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:28'! +rowwiseFrom: startRow to: endRow collect: aBlock + + | newField | + newField := GameOfLifeFieldSlice from: startRow to: endRow width: self width. + (startRow to: endRow) do: [ :y | + (1 to: self width) do: [ :x | newField at: x at: y put: (aBlock value: self value: x value: y) ] ]. + ^ newField! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +width + + ^ width! ! + +!GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45'! +width: anObject + + width := anObject! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +GameOfLifeField class + instanceVariableNames: ''! + +!GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:44'! +gliderFieldRows: numberOfRows columns: numberOfColumns + + | newField | + newField := self new rows: numberOfRows columns: numberOfColumns. + + newField + at: 8 at: 5 put: 1; + at: 9 at: 5 put: 1; + at: 10 at: 5 put: 1; + at: 10 at: 4 put: 1; + at: 9 at: 3 put: 1. + + ^ newField! ! + +!GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 20:43'! +rows: numberOfRows columns: numberOfColumns + + ^ self new rows: numberOfRows columns: numberOfColumns! ! + + +GameOfLifeField subclass: #GameOfLifeFieldSlice + instanceVariableNames: 'startRow endRow' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!GameOfLifeFieldSlice methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 10:22'! +from: startRow to: endRow width: width + + self startRow: startRow; + endRow: endRow; + width: width; + height: (endRow - startRow + 1); + data: (Matrix rows: (endRow - startRow + 1) columns: width). + + ^ self! ! + +!GameOfLifeFieldSlice methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:31'! +rowwiseDo: aBlock + + self startRow to: self endRow do: [ :rowNumber | + aBlock value: rowNumber value: (self data atRow: (rowNumber - self startRow) + 1). + ].! ! + + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:30'! +at: x at:y put: aValue + + self data at: y + 1 - self startRow at: x put: aValue.! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +endRow + + ^ endRow! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +endRow: anObject + + endRow := anObject! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +startRow + + ^ startRow! ! + +!GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11'! +startRow: anObject + + startRow := anObject! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +GameOfLifeFieldSlice class + instanceVariableNames: ''! + +!GameOfLifeFieldSlice class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 20:53'! +from: startRow to: endRow width: width + + ^ self new from: startRow to: endRow width: width! ! + + +Object subclass: #STMSimulation + instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +initialField: aGameOfLifeField + + self field: aGameOfLifeField. + self fieldNew: (GameOfLifeField rows: (aGameOfLifeField height) columns: (aGameOfLifeField width)).! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:50'! +simulateRound: roundNumber + + self processes: ((1 to: self numberOfProcesses) collect: [ :processNumber | + [| rows | + rows := self startProcess: processNumber. + rows rowwiseDo: [ :rowNumber :aRow | self fieldNew atRow: rowNumber put: aRow ]] parallelFork. ]). +! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + "Transcript show: self fieldNew print. + Transcript show: Character cr." + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +startProcess: processNumber + + | endOfSlice slice startOfSlice | + slice := (self field height / self numberOfProcesses). + startOfSlice := ((processNumber - 1) * slice) + 1. + endOfSlice := processNumber * slice. + + ^ self field rowwiseFrom: startOfSlice + to: endOfSlice + collect: [ :tempField :x :y | self thumbUpOrDownAt: x at: y on: tempField ] + + + ! ! + +!STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +thumbUpOrDownAt: x at: y on: tempField + | liveCellCount | + + liveCellCount := (tempField at: x - 1 at: y - 1) + + (tempField at: x + 0 at: y - 1) + + (tempField at: x + 1 at: y - 1) + + (tempField at: x - 1 at: y + 0) + + (tempField at: x + 1 at: y + 0) + + (tempField at: x - 1 at: y + 1) + + (tempField at: x + 0 at: y + 1) + + (tempField at: x + 1 at: y + 1). + + (tempField cellAliveAt: x at: y) + ifTrue: [((2 = liveCellCount) + or: [liveCellCount = 3]) + ifTrue: [^ 1] + ifFalse: [^ 0]] + ifFalse: [(liveCellCount = 3) + ifTrue: [^ 1] + ifFalse: [^ 0]]! ! + + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +field + + ^ field! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +field: anObject + + field := anObject! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldNew + + ^ fieldNew! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldNew: anObject + + fieldNew := anObject! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldSlices + + ^ fieldSlices! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +fieldSlices: anObject + + fieldSlices := anObject! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +numberOfProcesses + + ^ numberOfProcesses! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +numberOfProcesses: aNumber + + numberOfProcesses := aNumber + ! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +processes + + ^ processes! ! + +!STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44'! +processes: anObject + + processes := anObject! ! + + +!STMSimulation methodsFor: 'initialize-release' stamp: 'pre 1/21/2014 09:44'! +initialize + + self processes: OrderedCollection new. + ! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +STMSimulation class + instanceVariableNames: ''! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +benchmark2 + + ^ (1 to: 5) collect: [ :i | + [ self standardSimulation2: (2 raisedTo: i) ] timeToRun ]! ! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +standardSimulation2: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! + +!STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44'! +standardSimulation: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! + + +Object subclass: #Simulation + instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' + classVariableNames: '' + poolDictionaries: '' + category: 'VM-GameOfLife'! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:37'! +initialField: aGameOfLifeField + + self field: aGameOfLifeField. + self fieldNew: (GameOfLifeField rows: (aGameOfLifeField height) columns: (aGameOfLifeField width)).! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:44'! +simulateRound: roundNumber + + self processes: ((1 to: self numberOfProcesses) collect: [ :processNumber | + | semaphore | + semaphore := Semaphore new. + [| rows | + rows := self startProcess: processNumber. + rows rowwiseDo: [ :rowNumber :aRow | self fieldNew atRow: rowNumber put: aRow ]. + semaphore signal] fork. + semaphore ]). +! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:00'! +simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + "Transcript show: self fieldNew print. + Transcript show: Character cr." + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:34'! +startProcess: processNumber + + | endOfSlice slice startOfSlice | + slice := (self field height / self numberOfProcesses). + startOfSlice := ((processNumber - 1) * slice) + 1. + endOfSlice := processNumber * slice. + + ^ self field rowwiseFrom: startOfSlice + to: endOfSlice + collect: [ :tempField :x :y | self thumbUpOrDownAt: x at: y on: tempField ] + + + ! ! + +!Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 10:48'! +thumbUpOrDownAt: x at: y on: tempField + | liveCellCount | + + liveCellCount := (tempField at: x - 1 at: y - 1) + + (tempField at: x + 0 at: y - 1) + + (tempField at: x + 1 at: y - 1) + + (tempField at: x - 1 at: y + 0) + + (tempField at: x + 1 at: y + 0) + + (tempField at: x - 1 at: y + 1) + + (tempField at: x + 0 at: y + 1) + + (tempField at: x + 1 at: y + 1). + + (tempField cellAliveAt: x at: y) + ifTrue: [((2 = liveCellCount) + or: [liveCellCount = 3]) + ifTrue: [^ 1] + ifFalse: [^ 0]] + ifFalse: [(liveCellCount = 3) + ifTrue: [^ 1] + ifFalse: [^ 0]]! ! + + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +field + + ^ field! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +field: anObject + + field := anObject! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:06'! +fieldNew + + ^ fieldNew! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:06'! +fieldNew: anObject + + fieldNew := anObject! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:30'! +fieldSlices + + ^ fieldSlices! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:30'! +fieldSlices: anObject + + fieldSlices := anObject! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:09'! +numberOfProcesses + + ^ numberOfProcesses! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:09'! +numberOfProcesses: aNumber + + numberOfProcesses := aNumber + ! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +processes + + ^ processes! ! + +!Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04'! +processes: anObject + + processes := anObject! ! + + +!Simulation methodsFor: 'initialize-release' stamp: 'pre 1/19/2014 20:04'! +initialize + + self processes: OrderedCollection new. + ! ! + +"-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! + +Simulation class + instanceVariableNames: ''! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:08'! +benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:09'! +benchmark2 + + ^ (1 to: 5) collect: [ :i | + [ self standardSimulation2: (2 raisedTo: i) ] timeToRun ]! ! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:59'! +standardSimulation2: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! + +!Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:59'! +standardSimulation: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! diff --git a/images/STMActor.st b/images/STMActor.st new file mode 100644 --- /dev/null +++ b/images/STMActor.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:35 am'! Object subclass: #STMActor instanceVariableNames: 'queue handlers active' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:47'! initialize self handlers: Dictionary new. self queue: LinkedList new.! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32'! onMessage: aSymbol do: aBlock self handlers at: aSymbol put: aBlock! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:03'! receive | message | message := nil. [ (self queue isEmpty) ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:56'! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58'! send: aSymbol with: anArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58'! send: aSymbol with: anArgument and: anotherArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58'! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! start [self loop] parallelFork! ! !STMActor methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:04'! stop self active: false! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active ^ active! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active: anObject active := anObject! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers ^ handlers! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers: anObject handlers := anObject! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue ^ queue! ! !STMActor methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56'! queue: anObject queue := anObject! ! \ No newline at end of file diff --git a/images/STMAtomic.st b/images/STMAtomic.st new file mode 100644 --- /dev/null +++ b/images/STMAtomic.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:40 am'! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28'! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01'! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMAtomic class instanceVariableNames: ''! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! \ No newline at end of file diff --git a/images/STMFuture.st b/images/STMFuture.st new file mode 100644 --- /dev/null +++ b/images/STMFuture.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:42 am'! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23'! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ self error: 'Future already invoked' ]! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! value self process ifNotNil: [ self wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! wait self process wait.! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMFuture class instanceVariableNames: ''! !STMFuture class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! \ No newline at end of file diff --git a/images/STMMessage.st b/images/STMMessage.st new file mode 100644 --- /dev/null +++ b/images/STMMessage.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:01:44 am'! Object subclass: #STMMessage instanceVariableNames: 'messageName arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments ^ arguments! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments: anObject arguments := anObject! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName ^ messageName! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName: anObject messageName := anObject! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMMessage class instanceVariableNames: ''! !STMMessage class methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:40'! named: aSymbol withArgs: anArray ^(self new) messageName: aSymbol; arguments: anArray; yourself! ! \ No newline at end of file diff --git a/images/STMProcess.st b/images/STMProcess.st new file mode 100644 --- /dev/null +++ b/images/STMProcess.st @@ -0,0 +1,1 @@ +'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 20 January 2014 at 11:05:53 am'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! fork Transcript show: '* STM Process did not fork *' , Character cr. self resume! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! initialize lock := 1. super initialize! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! signal Transcript show: ' Failed to signal process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! wait self primWait ! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMProcess class instanceVariableNames: ''! !STMProcess class methodsFor: 'as yet unclassified' stamp: 'toma 1/20/2014 11:05'! forContext: t1 priority: t2 | t3 | t3 := self new. t3 suspendedContext: t1. t3 priority: t2. ^ t3 ! ! \ No newline at end of file diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,39 +1,443 @@ -'From Squeak4.1 of 17 April 2010 [latest update: #9957] on 17 April 2010 at 5:22:05 pm'! ----STARTUP----{17 April 2010 . 5:21:54 pm} as C:\Squeak\4.0\4.1-final\Squeak4.1.image! Smalltalk appendChangesTo: 'SqueakV41.sources'.! ----QUIT----{17 April 2010 . 5:22:11 pm} Squeak4.1.image priorSource: 89! ----STARTUP----{24 May 2010 . 8:07:26 pm} as C:\Squeak\4.2\Squeak4.1.image! ----SNAPSHOT----{24 May 2010 . 8:08:14 pm} Squeak4.2.image priorSource: 229! !HashedCollection commentStamp: 'ul 4/12/2010 22:37' prior: 0! I am an abstract collection of objects that implement hash and equality in a consitent way. This means that whenever two objects are equal, their hashes have to be equal too. If two objects are equal then I can only store one of them. Hashes are expected to be integers (preferably SmallIntegers). I also expect that the objects contained by me do not change their hashes. If that happens, hash invariants have to be re-established, which can be done by #rehash. Since I'm abstract, no instances of me should exist. My subclasses should implement #scanFor:, #fixCollisionsFrom: and #noCheckNoGrowFillFrom:. Instance Variables array: (typically Array or WeakArray) tally: (non-negative) array - An array whose size is a prime number, it's non-nil elements are the elements of the collection, and whose nil elements are empty slots. There is always at least one nil. In fact I try to keep my "load" at 75% or less so that hashing will work well. tally - The number of elements in the collection. The array size is always greater than this. Implementation details: I implement a hash table which uses open addressing with linear probing as the method of collision resolution. Searching for an element or a free slot for an element is done by #scanFor: which should return the index of the slot in array corresponding to it's argument. When an element is removed #fixCollisionsFrom: should rehash all elements in array between the original index of the removed element, wrapping around after the last slot until reaching an empty slot. My maximum load factor (75%) is hardcoded in #atNewIndex:put:, so it can only be changed by overriding that method. When my load factor reaches this limit I replace my array with a larger one (see #grow) ensuring that my load factor will be less than or equal to 50%. The new array is filled by #noCheckNoGrowFillFrom: which should use #scanForEmptySlotFor: instead of #scanFor: for better performance. I do not shrink. ! !WeakKeyDictionary methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Collection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:33' prior: 18816249! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger timesRepeat: [self add: newObject]. ^ newObject! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAll "HashedCollection compactAll" self allSubclassesDo: #compactAllInstances! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAllInstances "Do not use #allInstancesDo: because compact may create new instances." self allInstances do: #compact! ! !HashedCollection class methodsFor: 'sizing' stamp: 'ul 4/7/2010 00:17' prior: 55063414! goodPrimes "Answer a sorted array of prime numbers less than one billion that make good hash table sizes. Should be expanded as needed. See comments below code" ^#( 5 11 17 23 31 43 59 79 107 149 199 269 359 479 641 857 1151 1549 2069 2237 2423 2617 2797 2999 3167 3359 3539 3727 3911 4441 4787 5119 5471 5801 6143 6521 6827 7177 7517 7853 8783 9601 10243 10867 11549 12239 12919 13679 14293 15013 15731 17569 19051 20443 21767 23159 24611 25847 27397 28571 30047 31397 35771 38201 40841 43973 46633 48989 51631 54371 57349 60139 62969 70589 76091 80347 85843 90697 95791 101051 106261 111143 115777 120691 126311 140863 150523 160969 170557 181243 190717 201653 211891 221251 232591 242873 251443 282089 300869 321949 341227 362353 383681 401411 422927 443231 464951 482033 504011 562621 605779 647659 681607 723623 763307 808261 844709 886163 926623 967229 1014617 1121987 1201469 1268789 1345651 1429531 1492177 1577839 1651547 1722601 1800377 1878623 1942141 2028401 2242727 2399581 2559173 2686813 2836357 3005579 3144971 3283993 3460133 3582923 3757093 3903769 4061261 4455361 4783837 5068529 5418079 5680243 6000023 6292981 6611497 6884641 7211599 7514189 7798313 8077189 9031853 9612721 10226107 10745291 11338417 11939203 12567671 13212697 13816333 14337529 14938571 15595673 16147291 17851577 18993941 20180239 21228533 22375079 23450491 24635579 25683871 26850101 27921689 29090911 30153841 31292507 32467307 35817611 37983761 40234253 42457253 44750177 46957969 49175831 51442639 53726417 55954637 58126987 60365939 62666977 64826669 71582779 76039231 80534381 84995153 89500331 93956777 98470819 102879613 107400389 111856841 116365721 120819287 125246581 129732203 143163379 152076289 161031319 169981667 179000669 187913573 196826447 205826729 214748357 223713691 232679021 241591901 250504801 259470131 285162679 301939921 318717121 335494331 352271573 369148753 385926017 402603193 419480419 436157621 453034849 469712051 486589307 503366497 520043707 570475349 603929813 637584271 671138659 704693081 738247541 771801929 805356457 838910803 872365267 905919671 939574117 973128521 1006682977 1040137411 1073741833) "The above primes past 2069 were chosen carefully so that they do not interact badly with 1664525 (used by hashMultiply), and so that gcd(p, (256^k) +/- a) = 1, for 0 cost ifTrue: [ cost := newCost ] ]. cost ]."! ! !HashedCollection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:38' prior: 53647096! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger < 1 ifTrue: [ ^newObject ]. ^self add: newObject "I can only store an object once." ! ! !HashedCollection methodsFor: 'private' stamp: 'ul 4/12/2010 22:53'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: tally * 4 // 3. self growTo: newCapacity! ! !WeakSet methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Symbol class methodsFor: 'class initialization' stamp: 'ul 4/13/2010 00:00' prior: 30357901! compactSymbolTable "Reduce the size of the symbol table so that it holds all existing symbols with 25% free space." | oldSize | Smalltalk garbageCollect. oldSize := SymbolTable capacity. SymbolTable compact. ^(oldSize - SymbolTable capacity) printString, ' slot(s) reclaimed'! ! KeyedIdentitySet class removeSelector: #goodPrimes! WeakIdentityKeyDictionary class removeSelector: #goodPrimes! IdentitySet class removeSelector: #goodPrimes! IdentityDictionary class removeSelector: #goodPrimes! "Collections"! !HashedCollectionTest methodsFor: 'test - class - sizing' stamp: 'ul 4/7/2010 00:18' prior: 58761579! testPrimes: primes | badPrimes | badPrimes := #(3 5 71 139 479 5861 277421). "These primes are less than the hashMultiply constant (1664525) and 1664525 \\ prime is close to 0 (mod prime). The following snippet reproduces these numbers: | hashMultiplyConstant | hashMultiplyConstant := 1 hashMultiply. (Integer primesUpTo: hashMultiplyConstant) select: [ :each | | remainder | remainder := hashMultiplyConstant \\ each. remainder <= 1 or: [ remainder + 1 = each ] ]." self assert: primes isSorted. primes do: [ :each | self assert: each isPrime. self deny: (each > 2069 and: [ badPrimes includes: each ]) ]. self assert: ( primes select: [ :p | | result | result := false. p > 2069 ifTrue: [ 1 to: 8 do: [ :k | 1 to: 32 do: [ :a | (p gcd: (256 raisedTo: k) + a) = 1 ifFalse: [ result := true ]. (p gcd: (256 raisedTo: k) - a) = 1 ifFalse: [ result := true ] ] ] ]. result ]) isEmpty.! ! HashedCollectionTest removeSelector: #testGoodPrimesForIdentityBasedHashedCollections! "CollectionsTests"! !MCMczReader methodsFor: 'as yet unclassified' stamp: 'bf 4/18/2010 18:38' prior: 22938947! extractInfoFrom: dict ^MCWorkingCopy infoFromDictionary: dict cache: self infoCache! ! !MCWorkingCopy class methodsFor: 'as yet unclassified' stamp: 'bf 4/19/2010 00:39' prior: 23215403! infoFromDictionary: aDictionary cache: cache | id | id := (aDictionary at: #id) asString. ^ cache at: id ifAbsentPut: [MCVersionInfo name: (aDictionary at: #name ifAbsent: ['']) id: (UUID fromString: id) message: (aDictionary at: #message ifAbsent: ['']) date: ([Date fromString: (aDictionary at: #date)] ifError: [nil]) time: ([Time fromString: (aDictionary at: #time)] ifError: [nil]) author: (aDictionary at: #author ifAbsent: ['']) ancestors: (self ancestorsFromArray: (aDictionary at: #ancestors ifAbsent: []) cache: cache) stepChildren: (self ancestorsFromArray: (aDictionary at: #stepChildren ifAbsent: []) cache: cache)]! ! !MCVersionInfo methodsFor: 'converting' stamp: 'bf 4/18/2010 23:25' prior: 23175569! asDictionary ^ Dictionary new at: #name put: name; at: #id put: id asString; at: #message put: message; at: #date put: date; at: #time put: time; at: #author put: author; at: #ancestors put: (self ancestors collect: [:a | a asDictionary]); yourself! ! "Monticello"! !BlockContextTest methodsFor: 'running' stamp: 'md 9/6/2005 19:56' prior: 50431957! setUp super setUp. aBlockContext := [100 at 100 corner: 200 at 200]. contextOfaBlockContext := thisContext.! ! !BehaviorTest methodsFor: 'tests' stamp: 'md 2/18/2006 16:42' prior: 17365994! testBinding self assert: Object binding value = Object. self assert: Object binding key = #Object. self assert: Object class binding value = Object class. "returns nil for Metaclasses... like Encoder>>#associationFor:" self assert: Object class binding key = nil.! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53956757! testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #EmbeddedSourceQCompress ). self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer sourceCode: code. self assert: (trailer kind == #EmbeddedSourceZip ). newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53957691! testEmbeddingTempNames | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer tempNames: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #TempsNamesQCompress ). self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer tempNames: code. self assert: (trailer kind == #TempsNamesZip ). newTrailer := trailer testEncoding. self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:17' prior: 53958613! testEncodingNoTrailer | trailer | trailer := CompiledMethodTrailer new. "by default it should be a no-trailer" self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:14' prior: 53959109! testEncodingSourcePointer | trailer | trailer := CompiledMethodTrailer new. CompiledMethod allInstancesDo: [:method | | ptr | trailer method: method. self assert: ( (ptr := method sourcePointer) == trailer sourcePointer). "the last bytecode index must be at 0" ptr ~= 0 ifTrue: [ self assert: (method endPC = trailer endPC) ]. ].! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53959564! testEncodingVarLengthSourcePointer | trailer newTrailer | trailer := CompiledMethodTrailer new. trailer sourcePointer: 1. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 1). trailer sourcePointer: 16r100000000000000. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 16r100000000000000). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53960108! testSourceByIdentifierEncoding | trailer id | trailer := CompiledMethodTrailer new. id := UUID new asString. trailer sourceIdentifier: id. self assert: (trailer kind == #SourceByStringIdentifier ). trailer := trailer testEncoding. self assert: (trailer kind == #SourceByStringIdentifier ). self assert: (trailer sourceIdentifier = id). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:49' prior: 53960643! testSourceBySelectorEncoding | trailer | trailer := CompiledMethodTrailer new. trailer setSourceBySelector. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CategorizerTest methodsFor: 'running' stamp: 'mtf 9/10/2007 10:10' prior: 18074036! setUp categorizer := Categorizer defaultList: #(a b c d e). categorizer classifyAll: #(a b c) under: 'abc'. categorizer addCategory: 'unreal'.! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074267! testClassifyNewElementNewCategory categorizer classify: #f under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') (''nice'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:18' prior: 18074541! testClassifyNewElementOldCategory categorizer classify: #f under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074806! testClassifyOldElementNewCategory categorizer classify: #e under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'') (''nice'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:54' prior: 18075078! testClassifyOldElementOldCategory categorizer classify: #e under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:22' prior: 18075341! testDefaultCategoryIsTransient "Test that category 'as yet unclassified' disapears when all it's elements are removed'" categorizer classifyAll: #(d e) under: #abc. self assert: categorizer printString = '(''abc'' a b c d e) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 15:15' prior: 18075669! testNullCategory "Test that category 'as yet unclassified' disapears when all it's elements are removed'" | aCategorizer | aCategorizer := Categorizer defaultList: #(). self assert: aCategorizer printString = '(''as yet unclassified'') '. self assert: aCategorizer categories = #('no messages'). aCategorizer classify: #a under: #b. self assert: aCategorizer printString = '(''b'' a) '. self assert: aCategorizer categories = #(b).! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18076194! testRemoveEmptyCategory categorizer removeCategory: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:55' prior: 18076430! testRemoveExistingElement categorizer removeElement: #a. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076673! testRemoveNonEmptyCategory self should: [categorizer removeCategory: #abc] raise: Error. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076950! testRemoveNonExistingCategory categorizer removeCategory: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18077203! testRemoveNonExistingElement categorizer removeElement: #f. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 14:49' prior: 18077451! testRemoveThenRename categorizer removeCategory: #unreal. categorizer renameCategory: #abc toBe: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''unreal'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:14' prior: 18077736! testUnchanged self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! "KernelTests"! !SmalltalkImage methodsFor: 'accessing' stamp: 'ul 4/18/2010 22:22'! at: key ifPresentAndInMemory: aBlock "Lookup the given key in the receiver. If it is present, answer the value of evaluating the given block with the value associated with the key. Otherwise, answer nil." ^globals at: key ifPresentAndInMemory: aBlock! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:45'! image "Answer the object to query about the current object memory and execution environment." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:47'! imageFormatVersion "Answer an integer identifying the type of image. The image version number may identify the format of the image (e.g. 32 or 64-bit word size) or specific requirements of the image (e.g. block closure support required). This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk image imageFormatVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveImageFormatVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:38'! interpreterSourceVersion "Answer a string corresponding to the version of the interpreter source. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, as distinct from the external platform source code, typically written in C and managed separately for each platform. An optional primitive is invoked that may not be available on all virtual machines." "Smalltalk vm interpreterSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveInterpreterSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! platformSourceVersion "Answer a string corresponding to the version of the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk vm platformSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitivePlatformSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'image' stamp: 'md 5/16/2006 12:34' prior: 58536670! version "Answer the version of this release." ^SystemVersion current version! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! versionLabel "Answer a string corresponding to the version of virtual machine. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, in addition to the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines. See also vmVersion, which answers a string identifying the image from which virtual machine sources were generated." "Smalltalk vm versionLabel" self notify: 'This virtual machine does not support the optional primitive #primitiveVMVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:15'! vm "Answer the object to query about virtual machine." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 1/4/2010 21:40' prior: 58537225! wordSize "Answer the size in bytes of an object pointer or word in the object memory. The value does not change for a given image, but may be modified by a SystemTracer when converting the image to another format. The value is cached in WordSize to avoid the performance overhead of repeatedly consulting the VM." "Smalltalk wordSize" ^ WordSize ifNil: [WordSize := [SmalltalkImage current vmParameterAt: 40] on: Error do: [4]]! ! "System"! !SMLoaderPlus commentStamp: 'btr 12/1/2006 15:16' prior: 0! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). It uses ToolBuilder to construct its window. You can open one with: SMLoaderPlus open Instance Variables categoriesToFilterIds: The set of categories to filter the packages list. filters: The set of filters to apply to the packages list. map: The model SqueakMap. packagesList: The list of packages from the map. selectedCategory: The current category. selectedItem: The selected package or release. window: The window, held only so we can reOpen.! !SMLoaderCategoricalPlus commentStamp: 'btr 12/4/2006 15:47' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategoricalPlus open! !SMLoader commentStamp: 'btr 11/30/2006 18:00' prior: 27913009! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). You can open one with: SMLoader open! !SMLoaderCategorical commentStamp: 'btr 12/1/2006 15:16' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategorical open! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 18:06'! initialize Smalltalk at: #ToolBuilder ifPresent: [:tb | (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! openMenuString ^ 'SqueakMap Categories'! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! removeFromSystem (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self removeFromSystem: true! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString].! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:50'! buildFancyWith: aBuilder "Creates a variant of the window where the package pane is split between installed and uninstalled packages." | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight)); yourself); add: ((self buildNotInstalledPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ (horizDivide / 2)); yourself); add: ((self buildInstalledPackagesListWith: builder) frame: (vertDivide @ (horizDivide / 2) corner: 1 @ horizDivide); yourself); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1); yourself); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. self setUpdatablePanesFrom: #(#installedPackageList #notInstalledPackageList ). currentPackageList := #notInstalled. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:56'! buildInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #installedPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:52'! buildNotInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #notInstalledPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:55'! buildWith: aBuilder | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight))); add: ((self buildPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ horizDivide)); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1)); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList ^currentPackageList! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList: aSymbol currentPackageList := aSymbol. self changed: #installButtonLabel.! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:55'! defaultLabel ^ 'Categorical ' , super defaultLabel! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:58'! installButtonLabel ^ self currentPackageList = #notInstalled ifTrue: ['Install the above package'] ifFalse: ['Remove the above package']! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:52'! installedPackageList ^self packageList select: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! installedPackagesListIndex ^ self currentPackageList = #installed ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! installedPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #installed ifTrue: [self currentPackageList: #installed. self changed: #currentPackageList]. self noteChanged! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! isOn ^false! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:53'! notInstalledPackageList ^self packageList reject: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! notInstalledPackagesListIndex ^ self currentPackageList = #notInstalled ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:03'! notInstalledPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #notInstalled ifTrue: [self currentPackageList: #notInstalled. self changed: #currentPackageList]. self changed: #packagesListIndex. "update my selection" self noteChanged. self contentsChanged! ! !SMLoaderCategoricalPlus methodsFor: 'private' stamp: 'btr 12/1/2006 17:53'! noteChanged self changed: #installedPackageList. self changed: #notInstalledPackageList. super noteChanged." self changed: #packageNameList. self changed: #packagesListIndex. self changed: #categoriesForPackage. self contentsChanged."! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:34'! packageList ^ self packages select: [:e | (e categories anySatisfy: [:cat | cat = self selectedCategory]) and: [(filters ifNil: [#()]) allSatisfy: [:currFilter | (self perform: currFilter) value: e]]]! ! !SMLoaderPlus class methodsFor: 'parts bin' stamp: 'btr 11/22/2006 15:02'! descriptionForPartsBin ^self partName: 'Package Loader' categories: #(Tools) documentation: 'SqueakMap UI' ! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47'! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifPresent: [:tb | self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white "not set" ifTrue: [ Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor) ]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! initializedInstance ^ (ToolBuilder open: self new) extent: 400 at 400! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! new "Create a SqueakMap loader on the default map." ^self newOn: SMSqueakMap default! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! newOn: aMap "Create a SqueakMap loader on given map." ^super new on: aMap; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! newStandAlone ^ ToolBuilder open: self new! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:13'! open "Create and open a SqueakMap Loader." "SMLoaderPlus open" ^ (Smalltalk at: #ToolBuilder) open: self new! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:21'! openOn: aSqueakMap "Create and open a SqueakMap Loader on a given map." "self openOn: SqueakMap default" ^ (Smalltalk at: #ToolBuilder) open: (self newOn: aSqueakMap)! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:18'! prototypicalToolWindow ^ ToolBuilder open: self new; applyModelExtent; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:02'! registerInFlapsRegistry "Register the receiver in the system's flaps registry." self environment at: #Flaps ifPresent: [:cl | (cl respondsTo: #registerQuad:forFlapNamed:) ifTrue: [cl registerQuad: #(#SMLoader #prototypicalToolWindow 'Package Loader' 'The SqueakMap Package Loader' ) forFlapNamed: 'Tools']]! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoaderPlus class methodsFor: 'window color' stamp: 'btr 11/22/2006 15:02'! windowColorSpecification "Answer a WindowColorSpec object that declares my preference." ^WindowColorSpec classSymbol: self name wording: 'Package Loader' brightColor: Color yellow muchLighter duller pastelColor: Color yellow veryMuchLighter duller helpMessage: 'The SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! addSelectedCategoryAsFilter "Add a new filter that filters on the currently selected category. Make it enabled as default." categoriesToFilterIds add: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 16:11'! askToLoadUpdates "Check how old the map is and ask to update it if it is older than 10 days or if there is no map on disk." | available | available := map isCheckpointAvailable. (available not or: [ (Date today subtractDate: (Date fromSeconds: (map directory directoryEntryFor: map lastCheckpointFilename) modificationTime)) > 3]) ifTrue: [ (self confirm: (available ifTrue: ['The map on disk is more than 10 days old, update it from the Internet?'] ifFalse: ['There is no map on disk, fetch it from the Internet?'])) ifTrue: [self loadUpdates]]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:43'! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. dir := item isPackage ifTrue: [map cache directoryForPackage: item] ifFalse: [map cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. "withLabel: item name, ' cache directory'." win openInWorld! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildButtonBarWith: aBuilder ^ aBuilder pluggablePanelSpec new model: self; layout: #horizontal; children: (self commandSpecs select: [ :spec | spec fourth includes: #all] thenCollect: [ :spec | aBuilder pluggableActionButtonSpec new model: self; label: spec first; action: spec second; help: spec third; enabled: ((spec fourth includes: #item) ifTrue: [#hasSelectedItem]); yourself]); name: #buttonBar; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildCategoriesListWith: aBuilder "Create the hierarchical list holding the category tree." ^ aBuilder pluggableTreeSpec new model: self; roots: #categoryList; getSelectedPath: #selectedCategoryPath; getChildren: #categoryChildren:; hasChildren: #categoryHasChildren:; setSelected: #selectedCategory:; menu: #categoriesMenu:; label: #categoryLabel:; autoDeselect: true; wantsDrop: true; name: #categoriesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagePaneWith: aBuilder "Create the text area to the right in the loader." ^ aBuilder pluggableTextSpec new model: self; getText: #itemDescription; name: #packagePane; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagesListWith: aBuilder "Create the hierarchical list holding the packages and releases." ^ aBuilder pluggableTreeSpec new model: self; roots: #packageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; name: #packagesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildSearchPaneWith: aBuilder ^ aBuilder pluggableInputFieldSpec new model: self; selection: #searchSelection; getText: #searchText; setText: #findPackage:notifying:; name: #search; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:54'! buildWith: aBuilder "Create the package loader window." | buttonBarHeight vertDivide horizDivide | buttonBarHeight := 0.07. vertDivide := 0.6. horizDivide := 0.3. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight)); add: ((self buildSearchPaneWith: builder) frame: (0 @ buttonBarHeight corner: horizDivide @ (buttonBarHeight * 2))); add: ((self buildPackagesListWith: builder) frame: (0 @ (buttonBarHeight * 2) corner: horizDivide @ vertDivide)); add: ((self buildCategoriesListWith: builder) frame: (0 @ vertDivide corner: horizDivide @ 1)); add: ((self buildPackagePaneWith: builder) frame: (horizDivide @ buttonBarHeight corner: 1 @ 1)); yourself); yourself). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! cachePackageReleaseAndOfferToCopy "Cache package release, then offer to copy it somewhere. Answer the chosen file's location after copy, or the cache location if no directory was chosen." | release installer newDir newName newFile oldFile oldName | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. installer := SMInstaller forPackageRelease: release. [UIManager default informUser: 'Caching ' , release asString during: [installer cache]] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs. ^nil ]. installer isCached ifFalse: [self inform: 'Download failed, see transcript for details'. ^nil]. oldName := installer fullFileName. newDir := FileList2 modalFolderSelector: installer directory. newDir ifNil: [ ^oldName ]. newDir = installer directory ifTrue: [ ^oldName ]. newName := newDir fullNameFor: installer fileName. newFile := FileStream newFileNamed: newName. newFile ifNil: [ ^oldName ]. newFile binary. oldFile := FileStream readOnlyFileNamed: oldName. oldFile ifNil: [ ^nil ]. oldFile binary. [[ newDir copyFile: oldFile toFile: newFile ] ensure: [ oldFile close. newFile close ]] on: Error do: [ :ex | ^oldName ]. ^newName! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categoriesMenu: aMenu "Answer the categories-list menu." From noreply at buildbot.pypy.org Thu Apr 3 11:33:20 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 3 Apr 2014 11:33:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stm-c4: update to translate with stmgc-c7 Message-ID: <20140403093320.EC7CC1C022D@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: stm-c4 Changeset: r767:60b449663c44 Date: 2014-04-02 12:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/60b449663c44/ Log: update to translate with stmgc-c7 diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -227,10 +227,11 @@ if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True - driver.config.translation.gc = "stmgc" + # driver.config.translation.gc = "stmgc" driver.config.translation.gcrootfinder = 'stm' driver.config.translation.rweakref = True driver.config.translation.shared = False + driver.config.translation.cc = "clang" # driver.config.translation.jit = True print driver.config.translation From noreply at buildbot.pypy.org Thu Apr 3 11:33:22 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 3 Apr 2014 11:33:22 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stm-c4: merge 64bit-c2 branch Message-ID: <20140403093322.6C2521C022D@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: stm-c4 Changeset: r768:01f187f7c32b Date: 2014-04-02 12:51 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/01f187f7c32b/ Log: merge 64bit-c2 branch diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -144,7 +144,7 @@ "timerSemaphore" : SO_TIMER_SEMAPHORE, } -LONG_BIT = 32 +from rpython.rlib.rarithmetic import LONG_BIT TAGGED_MAXINT = 2 ** (LONG_BIT - 2) - 1 TAGGED_MININT = -2 ** (LONG_BIT - 2) diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -1,4 +1,3 @@ -from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.runicode import unicode_encode_utf_8 from rpython.rlib import jit diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -15,17 +15,25 @@ that create W_PointersObjects of correct size with attached shadows. """ import sys, weakref -from spyvm import constants, error, version +from spyvm import constants, error, system, version from spyvm.version import elidable_for_version from rpython.rlib import rrandom, objectmodel, jit, signature -from rpython.rlib.rarithmetic import intmask, r_uint, r_int +from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint, r_int from rpython.rlib.debug import make_sure_not_resized from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin from rpython.rtyper.lltypesystem import lltype, rffi from rsdl import RSDL, RSDL_helper + +if system.IS_64BIT: + from rpython.rlib.rarithmetic import widen +else: + def widen(x): + return x + + class W_Object(object): """Root of Squeak model, abstract.""" _attrs_ = [] # no RPython-level instance variables allowed in W_Object @@ -170,7 +178,7 @@ return isinstance(self.value, int) and self.value < 0x8000 def lshift(self, space, shift): - from rpython.rlib.rarithmetic import ovfcheck, intmask, r_uint + from rpython.rlib.rarithmetic import ovfcheck, intmask # shift > 0, therefore the highest bit of upperbound is not set, # i.e. upperbound is positive upperbound = intmask(r_uint(-1) >> shift) @@ -296,7 +304,6 @@ return space.wrap_int((self.value >> shift) & mask) def unwrap_uint(self, space): - from rpython.rlib.rarithmetic import r_uint return r_uint(self.value) def clone(self, space): @@ -398,11 +405,11 @@ from rpython.rlib.rstruct.ieee import float_pack r = float_pack(self.value, 8) # C double if n0 == 0: - return space.wrap_uint(r_uint(intmask(r >> 32))) + return space.wrap_uint(r_uint32(intmask(r >> 32))) else: # bounds-check for primitive access is done in the primitive assert n0 == 1 - return space.wrap_uint(r_uint(intmask(r))) + return space.wrap_uint(r_uint32(intmask(r))) def store(self, space, n0, w_obj): from rpython.rlib.rstruct.ieee import float_unpack, float_pack @@ -484,7 +491,7 @@ class W_AbstractPointersObject(W_AbstractObjectWithClassReference): """Common object.""" _attrs_ = ['shadow'] - + def changed(self): # This is invoked when an instance-variable is changed. # Kept here in case it might be usefull in the future. @@ -543,7 +550,7 @@ def _get_shadow(self): return self.shadow - + @objectmodel.specialize.arg(2) def attach_shadow_of_class(self, space, TheClass): shadow = TheClass(space, self) @@ -644,11 +651,11 @@ self.fieldtypes = fieldtypes_of_length(self.s_class, size) for i in range(size): # do it by hand for the JIT's sake vars[i] = w_nil - + def set_vars(self, new_vars): self._vars = new_vars make_sure_not_resized(self._vars) - + def fillin(self, space, g_self): W_AbstractPointersObject.fillin(self, space, g_self) from spyvm.fieldtypes import fieldtypes_of @@ -773,14 +780,19 @@ byte0 = ord(self.getchar(byte_index0)) byte1 = ord(self.getchar(byte_index0 + 1)) << 8 if byte1 & 0x8000 != 0: - byte1 = intmask(r_uint(0xffff0000) | r_uint(byte1)) + byte1 = intmask(widen(r_uint32(0xffff0000)) | widen(r_uint32(byte1))) return space.wrap_int(byte1 | byte0) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-0x8000, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + else: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError byte_index0 = index0 * 2 byte0 = i_value & 0xff byte1 = (i_value & 0xff00) >> 8 @@ -913,20 +925,25 @@ else: short = (word >> 16) & 0xffff if short & 0x8000 != 0: - short = r_uint(0xffff0000) | r_uint(short) + short = widen(r_uint32(0xffff0000)) | short return space.wrap_int(intmask(short)) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-0x8000, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + else: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError word_index0 = index0 / 2 - word = intmask(self.getword(word_index0)) + word = intmask(r_uint32(self.getword(word_index0))) if index0 % 2 == 0: - word = intmask(r_uint(word) & r_uint(0xffff0000)) | (i_value & 0xffff) + word = intmask(widen(r_uint32(word)) & widen(r_uint32(0xffff0000))) | (i_value & 0xffff) else: - word = (i_value << 16) | (word & 0xffff) + word = intmask(r_uint32((i_value << 16) | (word & 0xffff))) value = r_uint(word) self.setword(word_index0, value) @@ -993,10 +1010,10 @@ class W_DisplayBitmap(W_AbstractObjectWithClassReference): _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] - _immutable_fields_ = ['_realsize', 'display', '_depth'] + _immutable_fields_ = ['_realsize', 'display', '_depth', '_real_depth_buffer'] pixelbuffer = None - + @staticmethod def create(space, w_class, size, depth, display): if depth < 8: @@ -1010,7 +1027,7 @@ def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + self._real_depth_buffer = [r_uint(0)] * size self._realsize = size self.display = display self._depth = depth @@ -1021,7 +1038,7 @@ def atput0(self, space, index0, w_value): word = space.unwrap_uint(w_value) - self.setword(index0, word) + self.setword(index0, r_uint(word)) def flush_to_screen(self): self.display.flip() @@ -1046,7 +1063,7 @@ def setword(self, n, word): self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = word + self.display.get_pixelbuffer()[n] = r_uint32(word) def is_array_object(self): return True @@ -1080,13 +1097,13 @@ ((msb & mask) << 11) ) - self.display.get_pixelbuffer()[n] = r_uint(lsb | (msb << 16)) + self.display.get_pixelbuffer()[n] = r_uint32(lsb | (msb << 16)) class W_8BitDisplayBitmap(W_DisplayBitmap): def setword(self, n, word): self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = r_uint( + self.display.get_pixelbuffer()[n] = r_uint32( (word >> 24) | ((word >> 8) & 0x0000ff00) | ((word << 8) & 0x00ff0000) | @@ -1099,7 +1116,7 @@ @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word - word = r_uint(word) + nWord = r_uint(word) pos = self.compute_pos(n) assert self._depth <= 4 rshift = 32 - self._depth @@ -1108,10 +1125,10 @@ return mapword = r_uint(0) for i in xrange(4): - pixel = r_uint(word) >> rshift + pixel = r_uint(nWord) >> rshift mapword |= (r_uint(pixel) << (i * 8)) - word <<= self._depth - self.display.get_pixelbuffer()[pos] = mapword + nWord <<= self._depth + self.display.get_pixelbuffer()[pos] = r_uint32(mapword) pos += 1 def compute_pos(self, n): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, shadow, wrapper, version +from spyvm import constants, model, shadow, wrapper, system, version from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize @@ -15,7 +15,7 @@ self.make_bootstrap_objects() def find_executable(self, executable): - if os.sep in executable or (os.name == "nt" and ":" in executable): + if os.sep in executable or (system.IS_WINDOWS and ":" in executable): return executable path = os.environ.get("PATH") if path: @@ -198,9 +198,8 @@ # methods for wrapping and unwrapping stuff def wrap_int(self, val): - from spyvm import constants - assert isinstance(val, int) - # we don't do tagging + if not isinstance(val, int): + raise WrappingError return model.W_SmallInteger(val) def wrap_uint(self, val): diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -17,7 +17,7 @@ raise PrimitiveFailedError("BitBlt primitive not called in BitBlt object!") # only allow combinationRules 0-41 - combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) + combinationRule = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) if combinationRule > 41: raise PrimitiveFailedError("Missing combinationRule %d" % combinationRule) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -5,7 +5,7 @@ from spyvm import model, shadow from spyvm import constants, display from spyvm.error import PrimitiveFailedError, \ - PrimitiveNotYetWrittenError + PrimitiveNotYetWrittenError, WrappingError from spyvm import wrapper from rpython.rlib import rarithmetic, rfloat, unroll, jit @@ -296,9 +296,13 @@ @expose_primitive(FLOAT_TRUNCATED, unwrap_spec=[float]) def func(interp, s_frame, f): try: - return interp.space.wrap_int(rarithmetic.ovfcheck_float_to_int(f)) + integer = rarithmetic.ovfcheck_float_to_int(f) except OverflowError: raise PrimitiveFailedError + try: + return interp.space.wrap_int(integer) # in 64bit VMs, this may fail + except WrappingError: + raise PrimitiveFailedError @expose_primitive(FLOAT_TIMES_TWO_POWER, unwrap_spec=[float, int]) def func(interp, s_frame, rcvr, arg): @@ -648,17 +652,22 @@ def func(interp, s_frame, argcount, s_method): from spyvm.interpreter import Return w_rcvr = s_frame.peek(0) - try: - s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) - except Return: - w_dest_form = w_rcvr.fetch(interp.space, 0) - w_display = interp.space.objtable['w_display'] - if w_dest_form.is_same_object(w_display): - w_bitmap = w_display.fetch(interp.space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) - w_bitmap.flush_to_screen() - return w_rcvr - except shadow.MethodNotFound: + w_display = interp.space.objtable['w_display'] + if interp.space.unwrap_int(w_display.fetch(interp.space, 3)) == 1: + try: + s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) + except Return: + w_dest_form = w_rcvr.fetch(interp.space, 0) + if w_dest_form.is_same_object(w_display): + w_bitmap = w_display.fetch(interp.space, 0) + assert isinstance(w_bitmap, model.W_DisplayBitmap) + w_bitmap.flush_to_screen() + return w_rcvr + except shadow.MethodNotFound: + from spyvm.plugins.bitblt import BitBltPlugin + BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + return w_rcvr + else: from spyvm.plugins.bitblt import BitBltPlugin BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) return w_rcvr @@ -873,6 +882,15 @@ w_rcvr.s_class = w_arg.s_class + +if constants.LONG_BIT == 32: + def callIProxy(signature, interp, s_frame, argcount, s_method): + from spyvm.interpreter_proxy import IProxy + return IProxy.call(signature, interp, s_frame, argcount, s_method) +else: + def callIProxy(signature, interp, s_frame, argcount, s_method): + raise PrimitiveFailedError + @expose_primitive(EXTERNAL_CALL, clean_stack=False, no_result=True, compiled_method=True) def func(interp, s_frame, argcount, s_method): space = interp.space @@ -899,8 +917,7 @@ from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) else: - from spyvm.interpreter_proxy import IProxy - return IProxy.call(signature, interp, s_frame, argcount, s_method) + return callIProxy(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError @expose_primitive(COMPILED_METHOD_FLUSH_CACHE, unwrap_spec=[object]) @@ -1075,7 +1092,7 @@ sec_since_epoch = rarithmetic.r_uint(time.time()) # XXX: overflow check necessary? sec_since_1901 = sec_since_epoch + secs_between_1901_and_1970 - return interp.space.wrap_uint(sec_since_1901) + return interp.space.wrap_uint(rarithmetic.r_uint(sec_since_1901)) #____________________________________________________________________________ @@ -1119,7 +1136,7 @@ w_arg.setchar(i, chr(new_value)) elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): for i in xrange(w_arg.size()): - w_arg.setword(i, new_value) + w_arg.setword(i, rarithmetic.r_uint(new_value)) else: raise PrimitiveFailedError return w_arg diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -383,12 +383,11 @@ self.startup_time = time.time() def run_spy_hacks(self, space): - pass - # w_display = space.objtable["w_display"] - # if w_display is not None and w_display is not space.w_nil: - # if space.unwrap_int(w_display.fetch(space, 3)) < 8: - # # non-native indexed color depth not well supported - # w_display.store(space, 3, space.wrap_int(8)) + if constants.LONG_BIT == 64: + w_display = space.objtable["w_display"] + if w_display is not None and w_display is not space.w_nil: + if space.unwrap_int(w_display.fetch(space, 3)) < 32: + w_display.store(space, 3, space.wrap_int(32)) def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,7 +6,7 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow + error, shadow, system from spyvm.tool.analyseimage import create_image def _run_benchmark(interp, number, benchmark, arg, use_stm): @@ -223,7 +223,9 @@ def target(driver, *args): from rpython.rlib import rgc - driver.exe_name = 'rsqueak' + driver.exe_name = "rsqueakvm" + if system.IS_64BIT: + driver.exe_name += "-64" if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True From noreply at buildbot.pypy.org Thu Apr 3 11:33:23 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 3 Apr 2014 11:33:23 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stm-c4: change name, error out when stmgc isn't passed on cmdline Message-ID: <20140403093323.96EC01C022D@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: stm-c4 Changeset: r769:21cd797e1d36 Date: 2014-04-02 12:58 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/21cd797e1d36/ Log: change name, error out when stmgc isn't passed on cmdline diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -227,9 +227,10 @@ if system.IS_64BIT: driver.exe_name += "-64" if hasattr(rgc, "stm_is_enabled"): + driver.exe_name += "-stm" + driver.config.translation.gc = "stmgc" # Sadly, this also has to be passed on the commandline driver.config.translation.stm = True driver.config.translation.thread = True - # driver.config.translation.gc = "stmgc" driver.config.translation.gcrootfinder = 'stm' driver.config.translation.rweakref = True driver.config.translation.shared = False From noreply at buildbot.pypy.org Thu Apr 3 11:33:24 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 3 Apr 2014 11:33:24 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stm-c4: conditionally compile bitmap free Message-ID: <20140403093324.B07E61C022D@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: stm-c4 Changeset: r770:8ad3a8e8820d Date: 2014-04-02 13:22 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8ad3a8e8820d/ Log: conditionally compile bitmap free diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1076,7 +1076,16 @@ return self._real_depth_buffer def __del__(self): - lltype.free(self._real_depth_buffer, flavor='raw') + raw_free(self._real_depth_buffer, flavor='raw') + + +from rpython.rlib import rgc +if hasattr(rgc, "stm_is_enabled") and rgc.stm_is_enabled(): + def raw_free(buf, flavor="raw"): + pass # XXX: doesn't work with STM-C7? +else: + def raw_free(buf, flavor="raw"): + lltype.free(buf, flavor=flavor) class W_16BitDisplayBitmap(W_DisplayBitmap): From noreply at buildbot.pypy.org Thu Apr 3 11:35:00 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 3 Apr 2014 11:35:00 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: remove this head Message-ID: <20140403093500.684E61C022D@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r771:8e3e8038b946 Date: 2014-04-03 11:24 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8e3e8038b946/ Log: remove this head From noreply at buildbot.pypy.org Thu Apr 3 13:42:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Apr 2014 13:42:36 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix copystrcontent on stm Message-ID: <20140403114236.184141C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70417:a79b417389c0 Date: 2014-04-03 12:32 +0200 http://bitbucket.org/pypy/pypy/changeset/a79b417389c0/ Log: Fix copystrcontent on stm diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -125,16 +125,6 @@ return val not in self.write_barrier_applied - def handle_copystrcontent(self, op): - xxxxxxxx - # first, a write barrier on the target string - lst = op.getarglist() - lst[1] = self.gen_barrier(lst[1], 'W') - op = op.copy_and_change(op.getopnum(), args=lst) - # then an immutable read barrier the source string - # XXX: 'I' enough? - self.handle_category_operations(op, 'R') - @specialize.arg(1) def _do_stm_call(self, funcname, args, result): addr = self.gc_ll_descr.get_malloc_fn_addr(funcname) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1010,6 +1010,17 @@ self.mc.LEA(result, addr_add(self.SEGMENT_NO, frm, sizereg, baseofs, scale)) + def convert_addresses_to_linear(self, reg1, reg2): + if not self.cpu.gc_ll_descr.stm: # stm-only + return + if not IS_X86_64: + todo() # "needed for X86_64_SCRATCH_REG" + sb_adr = rstm.adr_segment_base + assert rx86.fits_in_32bits(sb_adr) # because it is in the 2nd page + self.mc.MOV_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, sb_adr)) + self.mc.ADD(reg1, X86_64_SCRATCH_REG) + self.mc.ADD(reg2, X86_64_SCRATCH_REG) + def _unaryop(asmop): def genop_unary(self, op, arglocs, resloc): getattr(self.mc, asmop)(arglocs[0]) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1133,6 +1133,8 @@ dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, forbidden_vars) self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc, is_unicode=is_unicode) + # for stm: convert the addresses from %gs-based to linear + self.assembler.convert_addresses_to_linear(srcaddr_loc, dstaddr_loc) # compute the length in bytes length_box = args[4] length_loc = self.loc(length_box) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -20,6 +20,8 @@ CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) adr_jmpbuf_ptr = ( CFlexSymbolic('((long)&STM_SEGMENT->jmpbuf_ptr)')) +adr_segment_base = ( + CFlexSymbolic('((long)&STM_SEGMENT->segment_base)')) adr_write_slowpath = CFlexSymbolic('((long)&_stm_write_slowpath)') adr__pypy_stm_become_inevitable = ( From noreply at buildbot.pypy.org Thu Apr 3 13:42:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Apr 2014 13:42:37 +0200 (CEST) Subject: [pypy-commit] pypy default: minor updates Message-ID: <20140403114237.633511C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70418:fb45fdcdba9e Date: 2014-04-03 13:42 +0200 http://bitbucket.org/pypy/pypy/changeset/fb45fdcdba9e/ Log: minor updates diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,7 +15,11 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the 2nd call +for donation (*not ready yet*) + +.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction and current status @@ -27,6 +31,8 @@ Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). +XXX more introduction + **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only @@ -46,7 +52,7 @@ rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py .. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ -.. __: http://buildbot.pypy.org/nightly/stmgc-c7/ +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/misc/ .. __: http://clang.llvm.org/get_started.html .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ From noreply at buildbot.pypy.org Thu Apr 3 15:12:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Apr 2014 15:12:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Enlarge the Introduction part. Message-ID: <20140403131210.E55191C320C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70419:de9d68af2ad1 Date: 2014-04-03 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/de9d68af2ad1/ Log: Enlarge the Introduction part. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -22,24 +22,42 @@ .. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html -Introduction and current status -=============================== +Introduction +============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of -PyPy, comparing the JITting version in both cases. It is called STM for -Software Transactional Memory, which is the internal technique used (see -`Reference to implementation details`_). +listed below, it should be in theory within 25%-50% of the speed of a +regular PyPy, comparing the JITting version in both cases. It is called +STM for Software Transactional Memory, which is the internal technique +used (see `Reference to implementation details`_). -XXX more introduction +What you get in exchange for this slow-down is that ``pypy-stm`` runs +any multithreaded Python program on multiple CPUs at once. Programs +running two threads or more in parallel should ideally run faster than +in a regular PyPy, either now or soon as issues are fixed. In one way, +that's all there is to it: this is a GIL-less Python, feel free to +`download and try it`__. However, the deeper idea behind the +``pypy-stm`` project is to improve what is so far the state-of-the-art +for using multiple CPUs, which for cases where separate processes don't +work is done by writing explicitly multi-threaded programs. Instead, +``pypy-stm`` is flushing forward an approach to *hide* the threads, as +described below in `atomic sections`_. + + +.. __: + +Current status +============== **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only interested in trying it out, you can download a Ubuntu 12.04 binary -here__. The current version supports four "segments", which means that -it will run up to four threads in parallel (in other words, you get a -GIL effect again, but only if trying to execute more than 4 threads). +here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, +but not stripped of debug symbols). The current version supports four +"segments", which means that it will run up to four threads in parallel +(in other words, you get a GIL effect again, but only if trying to +execute more than 4 threads). To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like @@ -59,10 +77,8 @@ Caveats: -* It should generally work. Please do `report bugs`_ that manifest as a - crash or wrong behavior (markedly different from the behavior of a - regular PyPy). Performance bugs are likely to be known issues; we're - working on them. +* So far, small examples work fine, but there are still a number of + bugs. We're busy fixing them. * The JIT warm-up time is abysmal (as opposed to the regular PyPy's, which is "only" bad). Moreover, you should run it with a command like @@ -191,7 +207,18 @@ result is that the behavior should be exactly equivalent: you don't get any extra multithreading issue. +This approach hides the notion of threads from the end programmer, +including all the hard multithreading-related issues. This is not the +first alternative approach to explicit threads; for example, OpenMP_ is +one. However, it is one of the first ones which does not require the +code to be organized in a particular fashion. Instead, it works on any +Python program which has got latent, imperfect parallelism. Ideally, it +only requires that the end programmer identifies where this parallelism +is likely to be found, and communicates it to the system, using for +example the ``transaction.add()`` scheme. + .. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP ================== From noreply at buildbot.pypy.org Thu Apr 3 15:47:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Apr 2014 15:47:13 +0200 (CEST) Subject: [pypy-commit] pypy default: detail Message-ID: <20140403134713.E201B1C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70420:a2b5c1b83017 Date: 2014-04-03 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/a2b5c1b83017/ Log: detail diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -254,7 +254,7 @@ The core of the implementation is in a separate C library called stmgc_, in the c7_ subdirectory. Please see the `README.txt`_ for more -information. +information. In particular, the notion of segment is discussed there. .. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ .. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ From noreply at buildbot.pypy.org Thu Apr 3 20:43:24 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 3 Apr 2014 20:43:24 +0200 (CEST) Subject: [pypy-commit] pypy default: not needed Message-ID: <20140403184325.008151C14DC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70421:b62e50619167 Date: 2014-04-03 20:57 +0300 http://bitbucket.org/pypy/pypy/changeset/b62e50619167/ Log: not needed diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,7 +48,6 @@ def tearDown(self): os.chdir(self.old_dir) - gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) From noreply at buildbot.pypy.org Thu Apr 3 20:43:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 3 Apr 2014 20:43:26 +0200 (CEST) Subject: [pypy-commit] pypy default: close open file before opening again Message-ID: <20140403184326.65B4A1C14DC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70422:fab45f2df6b6 Date: 2014-04-03 20:58 +0300 http://bitbucket.org/pypy/pypy/changeset/fab45f2df6b6/ Log: close open file before opening again diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) From noreply at buildbot.pypy.org Thu Apr 3 20:43:27 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 3 Apr 2014 20:43:27 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test, now raises cpython compatible IOError Message-ID: <20140403184327.B91F91C14DC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70423:50dedcdbad50 Date: 2014-04-03 21:39 +0300 http://bitbucket.org/pypy/pypy/changeset/50dedcdbad50/ Log: fix test, now raises cpython compatible IOError diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -528,7 +528,7 @@ f = open(fn) exc = raises(EnvironmentError, f.truncate, 3) if sys.platform == 'win32': - assert exc.value.winerror == 5 # ERROR_ACCESS_DENIED + assert exc.value.errno == 5 # ERROR_ACCESS_DENIED else: # CPython explicitely checks the file mode # PyPy relies on the libc to raise the error From noreply at buildbot.pypy.org Thu Apr 3 22:46:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Apr 2014 22:46:19 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: in-progress Message-ID: <20140403204619.EBD241C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5169:1bc0051aed74 Date: 2014-04-03 22:46 +0200 http://bitbucket.org/pypy/extradoc/changeset/1bc0051aed74/ Log: in-progress diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -12,49 +12,326 @@ Memory (TM) in PyPy, a way to run CPU-hungry Python programs in multithreaded mode. It is a follow-up on our `first call`_. Two years ago we suggested a single-threaded slow-down of somewhere between 2x and -5x. Our aim now is closer to 1.25x, i.e. running only 25% slower than -the regular PyPy. +5x. The aim that seems now within reach is rather closer to 1.25x, i.e. +running only 25% slower than the regular PyPy. We achieved --or overachieved-- most goals laid out in the first call by a large margin, while at the same time raising only about half the -money. The present proposal is thus about development of the second -half: starting from the various missing low-level optimizations, it will -most importantly focus on development of the Python-facing interface. -This includes both internal things (e.g. do dictionaries need to be more -TM-friendly in general?) as well as directly visible things (e.g. some -debugger-like interface to explore common conflicts in a program). It -also includes exploring and tweaking some existing libraries -(e.g. Twisted) to improve their TM-friendliness. +money. The result is described `in our docs`_. The present proposal is +about development of the second half: starting from the various missing +low-level optimizations, it will most importantly focus on development +of the Python-facing interface. This includes both internal things +(e.g. do dictionaries need to be more TM-friendly in general?) as well +as directly visible things (e.g. some debugger-like interface to explore +common conflicts in a program). It also includes exploring and tweaking +some existing libraries to improve their TM-friendliness (e.g. Twisted). See also the `update on HTM`_ below. +.. _`in our docs`: https://pypy.readthedocs.org/en/latest/stm.html + Introduction ============ +In the presence of today's machines with multiple processors, Python +progress is lagging behind: on any CPU-constrained program, developers +have a difficult choice to make. They can use in-process solutions that +do not offer multi-CPU usage. In this respect, the natural choice +nowadays is to use Twisted or other event-based paradigms, or systems +that hide events in the control flow, like Stackless; or alternatively, +they can use the existing ``threading`` module, with its associated GIL +and the complexities of real multi-threaded programming (locks, +deadlocks, races, etc.), which make this solution less attractive. The +big alternative is for them to rely on one of various multi-process +solutions that are outside the scope of the core language; all of them +in some way or another are hacks that require extra knowledge and time +to use and that have an impact on the structure of the whole program. +The aim of this series of proposals is to research and implement +Transactional Memory in PyPy. This is a technique that recently came to +the front of the multi-core scene. It promises to offer multi-core CPU +usage without requiring to fall back to the multi-process solutions +described above, and also without using the ``threading`` module --- +just as a small, local extension of the programming language that would +be used only in the core of the event loops. + +The first proposal was launched near the start of 2012 and has covered +the fundamental research part, up to the point of getting a first +version of PyPy working in a reasonable state (after collecting about +USD$27'000, which is little more than half of the money that would have +been required to do it more swiftly). + +This second proposal aims at fixing the remaining issues until we get a +really good GIL-free PyPy (described in `goal 1`_ below); and then we +will focus on the various new features needed to actually use multiple +cores without explicitly using multithreading (`goal 2`_ below), up to +and including adapting some existing framework libraries like Twisted, +Tornado, and possibly Stackless and gevent (`goal 3`_ below). In more details =============== +This is a call for financial help in implementing a version of PyPy able +to use multiple processors in a single process, called PyPy-TM; and +developping the APIs and libraries needed as well as enhancing commonly +available frameworks to use the new feature. The developers will be +Armin Rigo and Remi Meier and possibly others. -Hardware Transactional Memory +We currently estimate the final performance goal at 25% to 50% of the +speed of the regular PyPy in fully serial applications. (This goal has +been reached already in some cases, but we need to make this result more +broadly applicable.) We feel confident that it can work, in the +following sense: the performance of PyPy-TM running any suited +application should scale linearly or close-to-linearly with the number +of processors. This means that starting with two cores, such +applications should perform better than in a regular PyPy. (All numbers +presented here are comparing different versions of PyPy which all have +the JIT enabled.) + +You will find below a sketch of the `work plan`_. If more money than +requested is collected, then the excess will be entered into the general +PyPy pot, used for example to finance sprint travel costs to students. + +**Note** For donations higher than $1,000, we can arrange for an invoice +and a different payment method to avoid the high Paypal fees. Please +contact pypy at sfconservancy.org if you want to know details on how +to donate via other means. + + +What is the Global Interpreter Lock? +------------------------------------ + +The GIL, or Global Interpreter Lock, is a single lock in both CPython +and the regular PyPy. Every thread must acquire it in order to execute +Python bytecodes. This means that both with CPython and with the +regular PyPy, Python programs do not gain any benefit in term of +multicore performance even if they are using threads. + + +What is Transactional Memory? ----------------------------- +`Transactional Memory`_ --- TM --- is a technique imported from +databases: every time we want to do a change to the processors' main +memory, we do it in a "transaction". Multiple transactions can be +executed in parallel by multiple cores. When a transaction is complete, +we try to commit it. This might either succeed, or (if another +transaction committed incompatible changes) fail. If it fails, which is +hopefully rare, we need to restart the transaction from scratch. + +Transactional Memory research has progressed a lot since two years ago, +notably with the introduction of Intel's Haswell_ processors, which +offer Hardware Transactional Memory (HTM). We discuss below why we +think HTM is, so far, still not suitable for our goals. + +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory +.. _Haswell: http://en.wikipedia.org/wiki/Haswell_%28microarchitecture%29 + + +Hardware vs Software Transactional Memory +----------------------------------------- + +The idea of Transactional Memory was recently made popular by Intel's +Haswell_ processor (released in 2013). We could replace most of the +Software Transactional Memory (STM) library currently used inside PyPy +with a much smaller Hardware Transactional Memory (HTM) library based on +hardware features and running on Haswell-generation processors. This +has been attempted by Remi Meier recently. However, it seems that we +see problems as we expected them: the current generation of HTM +processors is limited to run small-scale transactions. Even the default +transaction size used in PyPy-STM is often too much for HTM; and +reducing this size increases overhead without completely solving the +problem. Based on this experience, it seems safe to say that right now +HTM-enabled processors lack the support that we need. + +Future processors might improve on various aspects. We are particularly +interested in `Virtualizing Transactional Memory`_, a 2005 paper that +describes the limits that we're running into and how to solve them more +generally. A CPU with support for the virtual memory described in this +paper would certainly be better for running PyPy-HTM. + +None of the papers we found discusses the issue of sub-cache-line false +conflicts, though (conflicts caused by two independent objects that +happens to live in the same cache line, which is usually 64 bytes). +This is in contrast with the current PyPy-STM, which doesn't have false +conflicts of this kind at all and might thus be ultimately better for +very-long-running transactions. + +.. _`Virtualizing Transactional Memory`: http://pages.cs.wisc.edu/~isca2005/papers/08A-02.PDF + + +Why do it with PyPy instead of CPython? +--------------------------------------- + +While there have been early experiments on Hardware Transactional Memory +with CPython (`Riley and Zilles (2006)`__, `Tabba (2010)`__), there has +been no recent one. The closest is an attempt using `Haswell on the +Ruby interpreter`_. None of these attempts tries to do the same using +Software Transactional Memory. We would nowadays consider it possible +to adapt our stmgc-c7 library for CPython, but it would be a lot of +work, starting from changing the reference-counting scheme. PyPy is +better designed to be open to this kind of research. + +But the best argument from an external point of view is probably that +PyPy has got a JIT to start with. It is thus starting from a better +position in terms of performance, particularly for the long-running kind +of programs that we target here. + +.. __: http://sabi.net/nriley/pubs/dls6-riley.pdf +.. __: http://www.cs.auckland.ac.nz/~fuad/parpycan.pdf +.. __: http://researcher.watson.ibm.com/researcher/files/jp-ODAIRA/PPoPP2014_RubyGILHTM.pdf + + +Alternatives +------------ + +PyPy-TM will be slower than judicious usage of existing alternatives, +based on multiple processes that communicate with each other in one way +or another. The counter-argument is that TM is not only a cleaner +solution: there are cases in which it is not doable to organize (or +retrofit) an existing program into the particular format needed for the +alternatives. In particular, small quickly-written programs don't need +the additional baggage of cross-process communication; and large +programs can sometimes be almost impossible to turn into multi-process +versions. By contrast, we believe that TM can fit naturally into most +programs, because it only requires local changes to some dispatcher; the +rest of the program should work without changes. + + +Other non-Linux platforms +------------------------- + +The current work relies heavily on Linux-, clang-, and 64-bit only +features. We believe it is a suitable restriction: a lot of multi- and +many-core servers commonly available are nowadays x86-64 machines +running Linux. Nevertheless, non-Linux solutions appear to be possible +as well. OS/X (and likely the various BSDs) seems to handle ``mmap()`` +better than Linux does, and can remap individual pages of an existing +mapping to various pages without hitting a limit of 65536 like Linux. +Windows might also have a way, although we didn't measure yet; but the +first issue with Windows would be to support Win64, which the regular +PyPy doesn't. + +We will likely explore the OS/X way (as well as the Windows way if Win64 +support grows in PyPy), but this is not included in the scope of this +proposal. + + More readings ------------- +See `our blog posts about STM`__. + +.. __: http://morepypy.blogspot.com/search/label/stm Work plan ========= +This is an very rough estimate of the amount of work it would take to +complete the steps for an experienced developer who is already familiar +with the PyPy codebase. As before, we cannot guarantee the time +estimates here, but we do agree to report regularly to the community, so +our progress can be followed publicly. +Paid work will be at $60/hour, but at least one developer who will work +on the project --- Armin Rigo --- has committed to 2 hours of volunteer +work per paid hour (so the total amount of money that we ask is divided +by three). A 10% general donation will go to the `Software Freedom +Conservancy`_ itself, the non-profit organization of which the PyPy +project is a member and which manages all the issues related to +donations, payments, and tax-exempt status. + +.. _`Software Freedom Conservancy`: http://sfconservancy.org/ + + +Goal 1 +------ + +The PyPy-STM that we have in the end of March 2014 is good enough in +some cases to run existing multithreaded code without a GIL, but not in +all of them. There are a number of caveats for the user and missing +optimizations. The goal #1 is to improve this case and address +the caveats. The current status is written down `in the docs`__ and +will evolve over time. + +.. __: https://pypy.readthedocs.org/en/latest/stm.html + +For future reference, at the end of March the main identified issues +are: + +* There are still a number of bugs. + +* The JIT warm-up time is abysmal. + +* The GC is missing a number of optimizations that are present in + a regular PyPy. + +* Destructors are not supported (``__del__()`` methods). + +* The STM bookkeeping logic could see more improvements. + +* Forking the process is slow. + +Fixing all these issues is required before we can confidently say that +PyPy-STM is an out-of-the-box replacement of a regular PyPy which gives +speed-ups over the regular PyPy independently of the Python program it +runs, as long as it is using at least two threads. + + +Goal 2 +------ + +This goal contains the various new features needed to use multiple cores +without explicitly using multithreading; in other words, the new APIs +and libraries accessible from Python programs that want to make use of +this benefit. + +XXX improve from here + +The goal is to improve the existing atomic sections, but the most +visible missing thing is that you don't get reports about the +"conflicts" you get. This would be the first thing that you need in +order to start using atomic sections more extensively. Also, for now: +for better results, try to explicitly force a transaction break just +before (and possibly after) each large atomic section, with +``time.sleep(0)``. + +This approach hides the notion of threads from the end programmer, +including all the hard multithreading-related issues. This is not the +first alternative approach to explicit threads; for example, OpenMP_ is +one. However, it is one of the first ones which does not require the +code to be organized in a particular fashion. Instead, it works on any +Python program which has got latent, imperfect parallelism. Ideally, it +only requires that the end programmer identifies where this parallelism +is likely to be found, and communicates it to the system, using for +example the ``transaction.add()`` scheme. + +XXX Talk also about dict- or list-specific conflict avoidance; +delaying some updates or I/O; etc. etc. + + +Goal 3 +------ + +XXX + + +--------- + +XXX fix +Total: 5 months for the initial version; at least 8 additional months +for the fast version. We will go with a total estimate of 15 months, +corresponding to USD$151200. The amount sought by this fundraising +campaign, considering the 2 volunteer hours per paid hour is thus USD$50400. Benefits of This Work to the Python Community and the General Public ==================================================================== + +XXX From noreply at buildbot.pypy.org Thu Apr 3 23:22:46 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 3 Apr 2014 23:22:46 +0200 (CEST) Subject: [pypy-commit] pypy default: must specify struct size on win32, why doesn't gcc care? Message-ID: <20140403212247.0100D1D2AC4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70424:095f6bf8e286 Date: 2014-04-03 23:50 +0300 http://bitbucket.org/pypy/pypy/changeset/095f6bf8e286/ Log: must specify struct size on win32, why doesn't gcc care? diff --git a/rpython/rlib/test/test_libffi.py b/rpython/rlib/test/test_libffi.py --- a/rpython/rlib/test/test_libffi.py +++ b/rpython/rlib/test/test_libffi.py @@ -540,7 +540,7 @@ } """ libfoo = CDLL(self.libfoo_name) - ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed]) + ffi_point_struct = make_struct_ffitype_e(rffi.sizeof(rffi.SIGNED)*2, 0, [types.signed, types.signed]) ffi_point = ffi_point_struct.ffistruct libfoo = CDLL(self.libfoo_name) From noreply at buildbot.pypy.org Thu Apr 3 23:22:48 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 3 Apr 2014 23:22:48 +0200 (CEST) Subject: [pypy-commit] pypy default: fix for win32 Message-ID: <20140403212248.4B2101D2AC4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70425:14dc8277e88a Date: 2014-04-04 00:20 +0300 http://bitbucket.org/pypy/pypy/changeset/14dc8277e88a/ Log: fix for win32 diff --git a/rpython/rlib/test/test_jit_libffi.py b/rpython/rlib/test/test_jit_libffi.py --- a/rpython/rlib/test/test_jit_libffi.py +++ b/rpython/rlib/test/test_jit_libffi.py @@ -1,13 +1,16 @@ import math import ctypes +import sys from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib import clibffi from rpython.rlib.rarithmetic import intmask from rpython.rlib.jit_libffi import CIF_DESCRIPTION from rpython.rlib.jit_libffi import jit_ffi_prep_cif, jit_ffi_call - -math_sin = intmask(ctypes.cast(ctypes.CDLL(None).sin, ctypes.c_void_p).value) +if sys.platform == 'win32': + math_sin = intmask(ctypes.cast(ctypes.cdll.msvcrt.sin, ctypes.c_void_p).value) +else: + math_sin = intmask(ctypes.cast(ctypes.CDLL(None).sin, ctypes.c_void_p).value) math_sin = rffi.cast(rffi.VOIDP, math_sin) From noreply at buildbot.pypy.org Thu Apr 3 23:55:45 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Apr 2014 23:55:45 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: CPython issue1692335: Exception.args is also set in __new__ Message-ID: <20140403215545.B563C1C011F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70426:81af5323708c Date: 2014-04-03 23:54 +0200 http://bitbucket.org/pypy/pypy/changeset/81af5323708c/ Log: CPython issue1692335: Exception.args is also set in __new__ (for naive exception subclasses which don't call super().__init__) diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -208,8 +208,10 @@ if basecls is None: basecls = cls def descr_new_base_exception(space, w_subtype, __args__): + args_w, kwds_w = __args__.unpack() # ignore kwds exc = space.allocate_instance(cls, w_subtype) basecls.__init__(exc, space) + exc.args_w = args_w return space.wrap(exc) descr_new_base_exception.func_name = 'descr_new_' + cls.__name__ return interp2app(descr_new_base_exception) diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -40,6 +40,14 @@ x = X(x=8) assert x.x == 8 + def test_args(self): + class X(Exception): + def __init__(self, x=3): + self.x = x + + assert X(8).args == (8,) + assert X(x=8).args == () + def test_exc(self): assert issubclass(Exception, BaseException) assert isinstance(Exception(), Exception) From noreply at buildbot.pypy.org Fri Apr 4 00:12:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 4 Apr 2014 00:12:04 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: fix str() w/ out an object specified & kw only args Message-ID: <20140403221204.F01811C320C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70427:bb6d5f9788c9 Date: 2014-04-03 15:04 -0700 http://bitbucket.org/pypy/pypy/changeset/bb6d5f9788c9/ Log: fix str() w/ out an object specified & kw only args diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -341,12 +341,14 @@ assert str(123) == '123' assert str(object=123) == '123' assert str([2, 3]) == '[2, 3]' + assert str(errors='strict') == '' class U(str): pass assert str(U()).__class__ is str assert U().__str__().__class__ is str assert U('test') == 'test' assert U('test').__class__ is U + assert U(errors='strict') == U('') def test_call_unicode_2(self): class X(object): diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -157,20 +157,18 @@ return space.newlist_unicode(lst) @staticmethod - @unwrap_spec(w_object=WrappedDefault(u'')) def descr_new(space, w_unicodetype, w_object=None, w_encoding=None, w_errors=None): - # NB. the default value of w_obj is really a *wrapped* empty string: - # there is gateway magic at work - w_obj = w_object - - encoding, errors = _get_encoding_and_errors(space, w_encoding, - w_errors) - if encoding is None and errors is None: - w_value = unicode_from_object(space, w_obj) + if w_object is None: + w_value = W_UnicodeObject.EMPTY else: - w_value = unicode_from_encoded_object(space, w_obj, encoding, - errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) + if encoding is None and errors is None: + w_value = unicode_from_object(space, w_object) + else: + w_value = unicode_from_encoded_object(space, w_object, + encoding, errors) if space.is_w(w_unicodetype, space.w_unicode): return w_value From noreply at buildbot.pypy.org Fri Apr 4 00:12:52 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 4 Apr 2014 00:12:52 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: Fix the test: fcntl is exactly the function I targeted when I changed space.c_filedescriptor_w(). Message-ID: <20140403221252.1CD341C320C@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: stdlib-3.2.5 Changeset: r70428:d153bcff0b93 Date: 2014-04-04 00:11 +0200 http://bitbucket.org/pypy/pypy/changeset/d153bcff0b93/ Log: Fix the test: fcntl is exactly the function I targeted when I changed space.c_filedescriptor_w(). diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -37,14 +37,10 @@ raises(TypeError, fcntl.fcntl, f, "foo") exc = raises(TypeError, fcntl.fcntl, F("foo"), 1) assert str(exc.value) == 'fileno() returned a non-integer' - exc = raises(ValueError, fcntl.fcntl, 2147483647 + 1, 1, 0) - assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' - exc = raises(ValueError, fcntl.fcntl, F(2147483647 + 1), 1, 0) - assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' - exc = raises(ValueError, fcntl.fcntl, -2147483648 - 1, 1, 0) - assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' - exc = raises(ValueError, fcntl.fcntl, F(-2147483648 - 1), 1, 0) - assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' + exc = raises(OverflowError, fcntl.fcntl, 2147483647 + 1, 1, 0) + exc = raises(OverflowError, fcntl.fcntl, F(2147483647 + 1), 1, 0) + exc = raises(OverflowError, fcntl.fcntl, -2147483648 - 1, 1, 0) + exc = raises(OverflowError, fcntl.fcntl, F(-2147483648 - 1), 1, 0) raises(ValueError, fcntl.fcntl, -1, 1, 0) raises(ValueError, fcntl.fcntl, F(-1), 1, 0) raises(ValueError, fcntl.fcntl, F(int(-1)), 1, 0) From noreply at buildbot.pypy.org Fri Apr 4 11:20:04 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 11:20:04 +0200 (CEST) Subject: [pypy-commit] pypy default: move optimizeopt tests from metainterp/tests to optimizeopt/test Message-ID: <20140404092004.C2AC11C10C6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70429:253569b13bea Date: 2014-04-04 11:17 +0200 http://bitbucket.org/pypy/pypy/changeset/253569b13bea/ Log: move optimizeopt tests from metainterp/tests to optimizeopt/test diff --git a/rpython/jit/metainterp/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py rename from rpython/jit/metainterp/test/test_intbound.py rename to rpython/jit/metainterp/optimizeopt/test/test_intbound.py diff --git a/rpython/jit/metainterp/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py rename from rpython/jit/metainterp/test/test_virtualstate.py rename to rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py From noreply at buildbot.pypy.org Fri Apr 4 11:20:06 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 11:20:06 +0200 (CEST) Subject: [pypy-commit] pypy default: of course since the tests are not where one expects, somebody made a new file Message-ID: <20140404092006.247831C10C6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70430:9d0e73ee9419 Date: 2014-04-04 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/9d0e73ee9419/ Log: of course since the tests are not where one expects, somebody made a new file diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py --- a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py @@ -1,5 +1,7 @@ from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 + from copy import copy import sys from rpython.rlib.rarithmetic import LONG_BIT @@ -235,8 +237,8 @@ for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)): #assert not b.lshift_bound(shift_count_bound).has_upper assert not b.rshift_bound(shift_count_bound).has_upper - - + + def test_div_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -258,7 +260,6 @@ assert a.contains(0) - def test_sub_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -271,3 +272,14 @@ a=bound(2, 4).sub_bound(bound(1, 2)) assert not a.contains(-1) assert not a.contains(4) + + +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py deleted file mode 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py +++ /dev/null @@ -1,12 +0,0 @@ -from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 - - -def test_next_pow2_m1(): - assert next_pow2_m1(0) == 0 - assert next_pow2_m1(1) == 1 - assert next_pow2_m1(7) == 7 - assert next_pow2_m1(256) == 511 - assert next_pow2_m1(255) == 255 - assert next_pow2_m1(80) == 127 - assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 - assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 From noreply at buildbot.pypy.org Fri Apr 4 11:26:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 11:26:44 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: finish the draft Message-ID: <20140404092644.76C4D1C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5170:bf6679f66da3 Date: 2014-04-04 11:26 +0200 http://bitbucket.org/pypy/extradoc/changeset/bf6679f66da3/ Log: finish the draft diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -253,7 +253,7 @@ Goal 1 ------ -The PyPy-STM that we have in the end of March 2014 is good enough in +The PyPy-TM that we have in the end of March 2014 is good enough in some cases to run existing multithreaded code without a GIL, but not in all of them. There are a number of caveats for the user and missing optimizations. The goal #1 is to improve this case and address @@ -279,7 +279,7 @@ * Forking the process is slow. Fixing all these issues is required before we can confidently say that -PyPy-STM is an out-of-the-box replacement of a regular PyPy which gives +PyPy-TM is an out-of-the-box replacement of a regular PyPy which gives speed-ups over the regular PyPy independently of the Python program it runs, as long as it is using at least two threads. @@ -292,46 +292,111 @@ and libraries accessible from Python programs that want to make use of this benefit. -XXX improve from here +This goal requires good support for very-long-running transactions, +started with the ``with atomic`` construct documented here__. This +approach hides the notion of threads from the end programmer, including +all the hard multithreading-related issues. This is not the first +alternative approach to explicit threads; for example, OpenMP_ is one. +However, it is one of the first ones which does not require the code to +be organized in a particular fashion. Instead, it works on any Python +program which has got latent, imperfect parallelism. Ideally, it only +requires that the end programmer identifies where this parallelism is +likely to be found, and communicates it to the system, using some +lightweight library on top of ``with atomic``. -The goal is to improve the existing atomic sections, but the most -visible missing thing is that you don't get reports about the -"conflicts" you get. This would be the first thing that you need in -order to start using atomic sections more extensively. Also, for now: -for better results, try to explicitly force a transaction break just -before (and possibly after) each large atomic section, with -``time.sleep(0)``. +This introduces new issues. At the very least, we need a way to get +feedback about what conflicts we get in these long-running transactions, +and where they are produced. A first step will be to implement getting +"tracebacks" that point to the places where the most time is lost. This +could be later integrated into some "debugger"-like variant where we can +navigate the conflicts, either in a live program or based on data logs. -This approach hides the notion of threads from the end programmer, -including all the hard multithreading-related issues. This is not the -first alternative approach to explicit threads; for example, OpenMP_ is -one. However, it is one of the first ones which does not require the -code to be organized in a particular fashion. Instead, it works on any -Python program which has got latent, imperfect parallelism. Ideally, it -only requires that the end programmer identifies where this parallelism -is likely to be found, and communicates it to the system, using for -example the ``transaction.add()`` scheme. +Some of these conflicts can be solved by improving PyPy-TM directly. +The system works on the granularity of objects and doesn't generate +false conflicts, but some conflicts may be regarded as "false" anyway: +these involve most importantly the built-in dictionary type, for which +we would like accesses and writes using independent keys to be truly +independent. Other built-in data structures we a similar issue are +lists: ideally, writes to different indexes should not cause conflicts; +but more generally, we would need a mechanism, possibly under the +control of the application, to do things like append an item to a list +in a "delayed" manner, to avoid conflicts. -XXX Talk also about dict- or list-specific conflict avoidance; -delaying some updates or I/O; etc. etc. +.. __: https://pypy.readthedocs.org/en/latest/stm.html + +Similarly, we might need a way to delay some I/O: doing it only at the +end of the transaction rather than immediately, in order to prevent the +whole transaction from turning inevitable. + +The goal 2 is thus the development of tools to inspect and fix the +causes of conflicts, as well as fixing the ones that are apparent inside +PyPy-TM directly. Goal 3 ------ -XXX +The third goal is to look at some existing event-based frameworks (for +example Twisted, Tornado, Stackless, gevent, ...) and attempt to make +them use threads and atomic sections internally. We would appreciate +help and feedback from people more involved in these frameworks, of +course. +The idea is to apply the techniques described in the `goal 2`_ until we +get a version of framework X which can transparently parallelize the +dispatching of multiple events. This might require some slight +reorganization of the core in order to split the I/O and the actual +logic into separate transactions. ---------- -XXX fix -Total: 5 months for the initial version; at least 8 additional months -for the fast version. We will go with a total estimate of 15 months, -corresponding to USD$151200. The amount sought by this fundraising -campaign, considering the 2 volunteer hours per paid hour is thus USD$50400. +Funding +------- + +We forecast that goal 1 and a good chunk of goal 2 should be reached in +around 4 months of work. The remaining parts of goal 2 as well as goal +3 are likely to be more open-ended jobs. We will go with a total +estimate of 8 months, corresponding to roughly the second half of the +`original call for proposal`_ which was not covered so far. This +corresponds to USD$80640. The amount sought by this fundraising +campaign, considering the 2 volunteer hours per paid hour is thus +USD$26880. Benefits of This Work to the Python Community and the General Public ==================================================================== -XXX +Python has become one of the most popular dynamic programming languages in +the world. Web developers, educators, and scientific programmers alike +all value Python because Python code is often more readable and because +Python often increases programmer productivity. + +Traditionally, languages like Python ran more slowly than static, compiled +languages; Python developers chose to sacrifice execution speed for ease +of programming. The PyPy project created a substantially improved Python +language implementation, including a fast Just-in-time (JIT) compiler. +The increased execution speed that PyPy provides has attracted many users, +who now find their Python code runs up to four times faster under PyPy +than under the reference implementation written in C. + +However, in the presence of today's machines with multiple processors, +Python progress lags behind. The issue has been described in the +introduction: developers that really need to use multiple CPUs are +constrained to select and use one of the multi-process solutions that +are all in some way or another hacks requiring extra knowledge and +efforts to use. The focus of the work described in this proposal is to +offer an alternative in the core of the Python language --- an +alternative that can naturally integrate with the rest of the program. +This alternative is implemented in PyPy. + +PyPy's developers make all PyPy software available to the public without +charge, under PyPy's Open Source copyright license, the permissive MIT +License. PyPy's license assures that PyPy is equally available to +everyone freely on terms that allow both non-commercial and commercial +activity. This license allows for academics, for-profit software +developers, volunteers and enthusiasts alike to collaborate together to +make a better Python implementation for everyone. + +PyPy-TM is and continues to be available under the same license. Being +licensed freely to the general public means that opportunities to use, +improve and learn about how Transactional Memory works itself will be +generally available to everyone. From noreply at buildbot.pypy.org Fri Apr 4 11:42:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 11:42:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: tweaks Message-ID: <20140404094206.76B811C066C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5171:77c53500b530 Date: 2014-04-04 11:42 +0200 http://bitbucket.org/pypy/extradoc/changeset/77c53500b530/ Log: tweaks diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -10,25 +10,30 @@ This is the second call for donations on the topic of Transactional Memory (TM) in PyPy, a way to run CPU-hungry Python programs in -multithreaded mode. It is a follow-up on our `first call`_. Two years -ago we suggested a single-threaded slow-down of somewhere between 2x and -5x. The aim that seems now within reach is rather closer to 1.25x, i.e. -running only 25% slower than the regular PyPy. +multithreaded mode. It is a follow-up on our `first call for +donations`_ from two years ago. At that time, we suggested a +single-threaded slow-down of somewhere between 2x and 5x. The aim that +seems now within reach is rather closer to 1.25x, i.e. running only 25% +slower than the regular PyPy. We achieved --or overachieved-- most goals laid out in the first call by a large margin, while at the same time raising only about half the -money. The result is described `in our docs`_. The present proposal is -about development of the second half: starting from the various missing -low-level optimizations, it will most importantly focus on development -of the Python-facing interface. This includes both internal things -(e.g. do dictionaries need to be more TM-friendly in general?) as well -as directly visible things (e.g. some debugger-like interface to explore -common conflicts in a program). It also includes exploring and tweaking -some existing libraries to improve their TM-friendliness (e.g. Twisted). +money. The result of this first step is `described in the documentation +of PyPy`__. + +The present proposal is about development of the second half: starting +from the various missing low-level optimizations, it will most +importantly focus on developping the Python-facing interface. This +includes both internal things (e.g. do dictionaries need to be more +TM-friendly in general?) as well as directly visible things (e.g. some +debugger-like interface to explore common conflicts in a program). It +also includes exploring and tweaking some existing libraries to improve +their TM-friendliness (e.g. Twisted and Stackless). See also the `update on HTM`_ below. -.. _`in our docs`: https://pypy.readthedocs.org/en/latest/stm.html +.. _`first call for donations`: http://pypy.org/tmdonate.html +.. __: https://pypy.readthedocs.org/en/latest/stm.html @@ -59,16 +64,16 @@ The first proposal was launched near the start of 2012 and has covered the fundamental research part, up to the point of getting a first -version of PyPy working in a reasonable state (after collecting about -USD$27'000, which is little more than half of the money that would have -been required to do it more swiftly). +version of PyPy working in a very roughly reasonable state (after +collecting about USD$27'000, which is little more than half of the money +that was asked; hence the present second call for donations). This second proposal aims at fixing the remaining issues until we get a really good GIL-free PyPy (described in `goal 1`_ below); and then we will focus on the various new features needed to actually use multiple cores without explicitly using multithreading (`goal 2`_ below), up to -and including adapting some existing framework libraries like Twisted, -Tornado, and possibly Stackless and gevent (`goal 3`_ below). +and including adapting some existing framework libraries like for +example Twisted, Tornado, Stackless and gevent (`goal 3`_ below). @@ -132,6 +137,8 @@ .. _Haswell: http://en.wikipedia.org/wiki/Haswell_%28microarchitecture%29 +.. _`update on HTM`: + Hardware vs Software Transactional Memory ----------------------------------------- @@ -170,7 +177,7 @@ While there have been early experiments on Hardware Transactional Memory with CPython (`Riley and Zilles (2006)`__, `Tabba (2010)`__), there has been no recent one. The closest is an attempt using `Haswell on the -Ruby interpreter`_. None of these attempts tries to do the same using +Ruby interpreter`__. None of these attempts tries to do the same using Software Transactional Memory. We would nowadays consider it possible to adapt our stmgc-c7 library for CPython, but it would be a lot of work, starting from changing the reference-counting scheme. PyPy is @@ -323,6 +330,7 @@ in a "delayed" manner, to avoid conflicts. .. __: https://pypy.readthedocs.org/en/latest/stm.html +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP Similarly, we might need a way to delay some I/O: doing it only at the end of the transaction rather than immediately, in order to prevent the @@ -356,7 +364,7 @@ around 4 months of work. The remaining parts of goal 2 as well as goal 3 are likely to be more open-ended jobs. We will go with a total estimate of 8 months, corresponding to roughly the second half of the -`original call for proposal`_ which was not covered so far. This +`first call for donations`_ which was not covered so far. This corresponds to USD$80640. The amount sought by this fundraising campaign, considering the 2 volunteer hours per paid hour is thus USD$26880. From noreply at buildbot.pypy.org Fri Apr 4 11:44:51 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 11:44:51 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: use unicode en dashes, ReST always escapes two dashes Message-ID: <20140404094451.0A66B1C066C@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5172:bc9dd03d4f90 Date: 2014-04-04 11:44 +0200 http://bitbucket.org/pypy/extradoc/changeset/bc9dd03d4f90/ Log: use unicode en dashes, ReST always escapes two dashes diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -16,7 +16,7 @@ seems now within reach is rather closer to 1.25x, i.e. running only 25% slower than the regular PyPy. -We achieved --or overachieved-- most goals laid out in the first call by +We achieved – or overachieved – most goals laid out in the first call by a large margin, while at the same time raising only about half the money. The result of this first step is `described in the documentation of PyPy`__. @@ -58,7 +58,7 @@ Transactional Memory in PyPy. This is a technique that recently came to the front of the multi-core scene. It promises to offer multi-core CPU usage without requiring to fall back to the multi-process solutions -described above, and also without using the ``threading`` module --- +described above, and also without using the ``threading`` module — just as a small, local extension of the programming language that would be used only in the core of the event loops. @@ -120,7 +120,7 @@ What is Transactional Memory? ----------------------------- -`Transactional Memory`_ --- TM --- is a technique imported from +`Transactional Memory`_ (TM) is a technique imported from databases: every time we want to do a change to the processors' main memory, we do it in a "transaction". Multiple transactions can be executed in parallel by multiple cores. When a transaction is complete, @@ -247,7 +247,7 @@ our progress can be followed publicly. Paid work will be at $60/hour, but at least one developer who will work -on the project --- Armin Rigo --- has committed to 2 hours of volunteer +on the project – Armin Rigo – has committed to 2 hours of volunteer work per paid hour (so the total amount of money that we ask is divided by three). A 10% general donation will go to the `Software Freedom Conservancy`_ itself, the non-profit organization of which the PyPy @@ -392,7 +392,7 @@ constrained to select and use one of the multi-process solutions that are all in some way or another hacks requiring extra knowledge and efforts to use. The focus of the work described in this proposal is to -offer an alternative in the core of the Python language --- an +offer an alternative in the core of the Python language — an alternative that can naturally integrate with the rest of the program. This alternative is implemented in PyPy. From noreply at buildbot.pypy.org Fri Apr 4 11:51:08 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 11:51:08 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more like a profiler at least at first Message-ID: <20140404095108.DD2611C0F12@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5173:4ff712ba945b Date: 2014-04-04 11:51 +0200 http://bitbucket.org/pypy/extradoc/changeset/4ff712ba945b/ Log: more like a profiler at least at first diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -23,10 +23,10 @@ The present proposal is about development of the second half: starting from the various missing low-level optimizations, it will most -importantly focus on developping the Python-facing interface. This +importantly focus on developing the Python-facing interface. This includes both internal things (e.g. do dictionaries need to be more TM-friendly in general?) as well as directly visible things (e.g. some -debugger-like interface to explore common conflicts in a program). It +profiler-like interface to explore common conflicts in a program). It also includes exploring and tweaking some existing libraries to improve their TM-friendliness (e.g. Twisted and Stackless). @@ -315,7 +315,7 @@ feedback about what conflicts we get in these long-running transactions, and where they are produced. A first step will be to implement getting "tracebacks" that point to the places where the most time is lost. This -could be later integrated into some "debugger"-like variant where we can +could be later integrated into some "profiler"-like variant where we can navigate the conflicts, either in a live program or based on data logs. Some of these conflicts can be solved by improving PyPy-TM directly. From noreply at buildbot.pypy.org Fri Apr 4 11:51:53 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 11:51:53 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: take out the "hack" part, the downsides of multi processes are large enough on Message-ID: <20140404095153.CBE231C0F12@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5174:42bf35228888 Date: 2014-04-04 11:51 +0200 http://bitbucket.org/pypy/extradoc/changeset/42bf35228888/ Log: take out the "hack" part, the downsides of multi processes are large enough on their own diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -50,9 +50,9 @@ and the complexities of real multi-threaded programming (locks, deadlocks, races, etc.), which make this solution less attractive. The big alternative is for them to rely on one of various multi-process -solutions that are outside the scope of the core language; all of them -in some way or another are hacks that require extra knowledge and time -to use and that have an impact on the structure of the whole program. +solutions that are outside the scope of the core language. All of them require a +big restructuring of the program to and often need extreme care and extra +knowledge to use them. The aim of this series of proposals is to research and implement Transactional Memory in PyPy. This is a technique that recently came to From noreply at buildbot.pypy.org Fri Apr 4 11:57:52 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 11:57:52 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: try to clarify this Message-ID: <20140404095752.D95411C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5175:a222f0289211 Date: 2014-04-04 11:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/a222f0289211/ Log: try to clarify this diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -58,9 +58,9 @@ Transactional Memory in PyPy. This is a technique that recently came to the front of the multi-core scene. It promises to offer multi-core CPU usage without requiring to fall back to the multi-process solutions -described above, and also without using the ``threading`` module — -just as a small, local extension of the programming language that would -be used only in the core of the event loops. +described above, and also should allow to change the core of the event systems +mentioned above to enable the use of multiple cores without the use of the +``threading`` module. The first proposal was launched near the start of 2012 and has covered the fundamental research part, up to the point of getting a first From noreply at buildbot.pypy.org Fri Apr 4 12:01:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 12:01:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: rewrites Message-ID: <20140404100126.33D371C12F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5176:2f7bbe9b2c8e Date: 2014-04-04 12:01 +0200 http://bitbucket.org/pypy/extradoc/changeset/2f7bbe9b2c8e/ Log: rewrites diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -148,7 +148,7 @@ with a much smaller Hardware Transactional Memory (HTM) library based on hardware features and running on Haswell-generation processors. This has been attempted by Remi Meier recently. However, it seems that we -see problems as we expected them: the current generation of HTM +see scaling problems (as we expected them): the current generation of HTM processors is limited to run small-scale transactions. Even the default transaction size used in PyPy-STM is often too much for HTM; and reducing this size increases overhead without completely solving the @@ -161,12 +161,12 @@ generally. A CPU with support for the virtual memory described in this paper would certainly be better for running PyPy-HTM. -None of the papers we found discusses the issue of sub-cache-line false -conflicts, though (conflicts caused by two independent objects that -happens to live in the same cache line, which is usually 64 bytes). -This is in contrast with the current PyPy-STM, which doesn't have false -conflicts of this kind at all and might thus be ultimately better for -very-long-running transactions. +Another issue is sub-cache-line false conflicts (conflicts caused by two +independent objects that happens to live in the same cache line, which +is usually 64 bytes). This is in contrast with the current PyPy-STM, +which doesn't have false conflicts of this kind at all and might thus be +ultimately better for very-long-running transactions. None of the +papers we know of discusses this issue. .. _`Virtualizing Transactional Memory`: http://pages.cs.wisc.edu/~isca2005/papers/08A-02.PDF @@ -215,13 +215,14 @@ The current work relies heavily on Linux-, clang-, and 64-bit only features. We believe it is a suitable restriction: a lot of multi- and many-core servers commonly available are nowadays x86-64 machines -running Linux. Nevertheless, non-Linux solutions appear to be possible -as well. OS/X (and likely the various BSDs) seems to handle ``mmap()`` -better than Linux does, and can remap individual pages of an existing -mapping to various pages without hitting a limit of 65536 like Linux. -Windows might also have a way, although we didn't measure yet; but the -first issue with Windows would be to support Win64, which the regular -PyPy doesn't. +running Linux. (If PyPy gets a 64-bit ARMv8 JIT backend, adapting it +for STM should be easy). Nevertheless, non-Linux solutions appear to be +possible as well. OS/X (and likely the various BSDs) seems to handle +``mmap()`` better than Linux does, and can remap individual pages of an +existing mapping to various pages without hitting a limit of 65536 like +Linux. Windows might also have a way, although we didn't measure yet; +but the first issue with Windows would be to support Win64, which the +regular PyPy doesn't. We will likely explore the OS/X way (as well as the Windows way if Win64 support grows in PyPy), but this is not included in the scope of this From noreply at buildbot.pypy.org Fri Apr 4 12:06:22 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 12:06:22 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: small fixes, one comment Message-ID: <20140404100622.33E431C320C@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5177:755f37488751 Date: 2014-04-04 12:06 +0200 http://bitbucket.org/pypy/extradoc/changeset/755f37488751/ Log: small fixes, one comment diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -51,7 +51,7 @@ deadlocks, races, etc.), which make this solution less attractive. The big alternative is for them to rely on one of various multi-process solutions that are outside the scope of the core language. All of them require a -big restructuring of the program to and often need extreme care and extra +big restructuring of the program and often need extreme care and extra knowledge to use them. The aim of this series of proposals is to research and implement @@ -59,8 +59,8 @@ the front of the multi-core scene. It promises to offer multi-core CPU usage without requiring to fall back to the multi-process solutions described above, and also should allow to change the core of the event systems -mentioned above to enable the use of multiple cores without the use of the -``threading`` module. +mentioned above to enable the use of multiple cores without the explicit use of +the ``threading`` module by the user. The first proposal was launched near the start of 2012 and has covered the fundamental research part, up to the point of getting a first @@ -82,7 +82,7 @@ This is a call for financial help in implementing a version of PyPy able to use multiple processors in a single process, called PyPy-TM; and -developping the APIs and libraries needed as well as enhancing commonly +developing the APIs and libraries needed as well as enhancing commonly available frameworks to use the new feature. The developers will be Armin Rigo and Remi Meier and possibly others. @@ -90,12 +90,14 @@ speed of the regular PyPy in fully serial applications. (This goal has been reached already in some cases, but we need to make this result more broadly applicable.) We feel confident that it can work, in the -following sense: the performance of PyPy-TM running any suited +following sense: the performance of PyPy-TM running any suitable application should scale linearly or close-to-linearly with the number of processors. This means that starting with two cores, such applications should perform better than in a regular PyPy. (All numbers presented here are comparing different versions of PyPy which all have the JIT enabled.) +XXX I wonder whether we need to add a caveat like "for applications that don't +conflict too much" somewhere You will find below a sketch of the `work plan`_. If more money than requested is collected, then the excess will be entered into the general From noreply at buildbot.pypy.org Fri Apr 4 12:07:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 12:07:30 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: front -> forefront Message-ID: <20140404100730.9A2FE1C320C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5178:c4300ba74632 Date: 2014-04-04 12:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/c4300ba74632/ Log: front -> forefront diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -56,7 +56,7 @@ The aim of this series of proposals is to research and implement Transactional Memory in PyPy. This is a technique that recently came to -the front of the multi-core scene. It promises to offer multi-core CPU +the forefront of the multi-core scene. It promises to offer multi-core CPU usage without requiring to fall back to the multi-process solutions described above, and also should allow to change the core of the event systems mentioned above to enable the use of multiple cores without the explicit use of From noreply at buildbot.pypy.org Fri Apr 4 12:16:38 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 12:16:38 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: another comment, small tweaks Message-ID: <20140404101638.9F22E1C011F@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5179:c7690a124f48 Date: 2014-04-04 12:16 +0200 http://bitbucket.org/pypy/extradoc/changeset/c7690a124f48/ Log: another comment, small tweaks diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -73,7 +73,7 @@ will focus on the various new features needed to actually use multiple cores without explicitly using multithreading (`goal 2`_ below), up to and including adapting some existing framework libraries like for -example Twisted, Tornado, Stackless and gevent (`goal 3`_ below). +example Twisted, Tornado, Stackless, or gevent (`goal 3`_ below). @@ -252,7 +252,8 @@ Paid work will be at $60/hour, but at least one developer who will work on the project – Armin Rigo – has committed to 2 hours of volunteer work per paid hour (so the total amount of money that we ask is divided -by three). A 10% general donation will go to the `Software Freedom +by three XXX this sounds like *only* armin works on the project. maybe just +divide by two?). A 10% general donation will go to the `Software Freedom Conservancy`_ itself, the non-profit organization of which the PyPy project is a member and which manages all the issues related to donations, payments, and tax-exempt status. @@ -355,7 +356,7 @@ The idea is to apply the techniques described in the `goal 2`_ until we get a version of framework X which can transparently parallelize the -dispatching of multiple events. This might require some slight +dispatching and execution of multiple events. This might require some slight reorganization of the core in order to split the I/O and the actual logic into separate transactions. From noreply at buildbot.pypy.org Fri Apr 4 12:24:20 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 12:24:20 +0200 (CEST) Subject: [pypy-commit] pypy default: a test for making a guard_value Message-ID: <20140404102420.AA8BE1C10C6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70431:c2f3df441d50 Date: 2014-04-04 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/c2f3df441d50/ Log: a test for making a guard_value diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -202,6 +202,17 @@ """ self.compare(guards, expected, [box]) + def test_known_value(self): + value1 = OptValue(self.nodebox) + value1.make_constant(ConstInt(1)) + box = self.nodebox + guards = value1.make_guards(box) + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.compare(guards, expected, [box]) + def test_equal_inputargs(self): value = OptValue(self.nodebox) classbox = self.cpu.ts.cls_of_box(self.nodebox) From noreply at buildbot.pypy.org Fri Apr 4 12:25:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 12:25:22 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Rewrite this section Message-ID: <20140404102522.31BFA1C10C6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5180:a4fb1f479cb1 Date: 2014-04-04 12:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/a4fb1f479cb1/ Log: Rewrite this section diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -211,25 +211,36 @@ rest of the program should work without changes. -Other non-Linux platforms -------------------------- +Other platforms than the x86-64 Linux +------------------------------------- -The current work relies heavily on Linux-, clang-, and 64-bit only -features. We believe it is a suitable restriction: a lot of multi- and -many-core servers commonly available are nowadays x86-64 machines -running Linux. (If PyPy gets a 64-bit ARMv8 JIT backend, adapting it -for STM should be easy). Nevertheless, non-Linux solutions appear to be -possible as well. OS/X (and likely the various BSDs) seems to handle -``mmap()`` better than Linux does, and can remap individual pages of an -existing mapping to various pages without hitting a limit of 65536 like -Linux. Windows might also have a way, although we didn't measure yet; -but the first issue with Windows would be to support Win64, which the -regular PyPy doesn't. +The first thing to note is that the current solution depends on having a +huge address space available. If it were to be ported to any 32-bit +architecture, the limitation to 2GB or 4GB of address space would become +very restrictive: the way it works right now would further divide this +limit by N+1, where N is the number of segments. It might be possible +to create partially different memory views for multiple threads that +each access the same range of addresses; this would require extensions +that are very OS-specific. We didn't investigate so far. + +The current version, which thus only works on 64-bit, still relies +heavily on Linux- and clang-only features. We believe it is a suitable +restriction: a lot of multi- and many-core servers commonly available +are nowadays x86-64 machines running Linux. Nevertheless, non-Linux +solutions appear to be possible as well. OS/X (and likely the various +BSDs) seems to handle ``mmap()`` better than Linux does, and can remap +individual pages of an existing mapping to various pages without hitting +a limit of 65536 like Linux. Windows might also have a way, although we +didn't measure yet; but the first issue with Windows would be to support +Win64, which the regular PyPy doesn't. We will likely explore the OS/X way (as well as the Windows way if Win64 support grows in PyPy), but this is not included in the scope of this proposal. +It might be possible to adapt the work done on x86-64 to the 64-bit +ARMv8 as well, but we didn't investigate so far. + More readings ------------- From noreply at buildbot.pypy.org Fri Apr 4 12:56:58 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 4 Apr 2014 12:56:58 +0200 (CEST) Subject: [pypy-commit] pypy default: disallow asmgcc on win32, breaks untested mingw support Message-ID: <20140404105658.889F71D27B6@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70432:bc44c98191c9 Date: 2014-04-04 13:16 +0300 http://bitbucket.org/pypy/pypy/changeset/bc44c98191c9/ Log: disallow asmgcc on win32, breaks untested mingw support diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -2,9 +2,16 @@ from rpython.config.translationoption import get_combined_translation_config from rpython.config.translationoption import set_opt_level from rpython.config.config import ConflictConfigError +from rpython.translator.platform import platform as compiler def test_no_gcrootfinder_with_boehm(): config = get_combined_translation_config() config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') + +if compiler.name == 'msvc': + def test_no_asmgcrot_on_msvc(): + config = get_combined_translation_config() + config.translation.setoption('gcrootfinder', 'asmgcc', 'required') + py.test.raises(ConflictConfigError, set_opt_level, config, 'jit') diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,7 +1,7 @@ import sys import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption -from rpython.config.config import ChoiceOption, StrOption, Config +from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors @@ -366,9 +366,11 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X + # disallow asmgcc on OS/X and windows if config.translation.gcrootfinder == "asmgcc": assert sys.platform != "darwin" + if (sys.platform == 'win32'): + raise ConflictConfigError("asmgcc unusable on win32") # ---------------------------------------------------------------- From noreply at buildbot.pypy.org Fri Apr 4 12:56:59 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 4 Apr 2014 12:56:59 +0200 (CEST) Subject: [pypy-commit] pypy default: disallow asmgcc on msvc only Message-ID: <20140404105659.DC5B71D27B6@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70433:3af6adc2c895 Date: 2014-04-04 13:53 +0300 http://bitbucket.org/pypy/pypy/changeset/3af6adc2c895/ Log: disallow asmgcc on msvc only diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -1,7 +1,7 @@ import py from rpython.config.translationoption import get_combined_translation_config from rpython.config.translationoption import set_opt_level -from rpython.config.config import ConflictConfigError +from rpython.config.config import ConflictConfigError, ConfigError from rpython.translator.platform import platform as compiler @@ -13,5 +13,5 @@ if compiler.name == 'msvc': def test_no_asmgcrot_on_msvc(): config = get_combined_translation_config() - config.translation.setoption('gcrootfinder', 'asmgcc', 'required') - py.test.raises(ConflictConfigError, set_opt_level, config, 'jit') + py.test.raises(ConfigError, config.translation.setoption, + 'gcrootfinder', 'asmgcc', 'user') diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -4,6 +4,7 @@ from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors +from rpython.translator.platform import platform as compiler DEFL_INLINE_THRESHOLD = 32.4 # just enough to inline add__Int_Int() # and just small enough to prevend inlining of some rlist functions. @@ -16,8 +17,13 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] +elif compiler.name == 'msvc': + DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -85,7 +91,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ROOTFINDERS, "shadowstack", cmdline="--gcrootfinder", requires={ @@ -366,11 +372,9 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X and windows + # disallow asmgcc on OS/X if config.translation.gcrootfinder == "asmgcc": assert sys.platform != "darwin" - if (sys.platform == 'win32'): - raise ConflictConfigError("asmgcc unusable on win32") # ---------------------------------------------------------------- diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py @@ -1,4 +1,9 @@ +import py from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests +from rpython.translator.platform import platform as compiler + +if compiler.name == 'msvc': + py.test.skip('asmgcc buggy on msvc') class TestAsmGcc(CompileFrameworkTests): gcrootfinder = "asmgcc" From noreply at buildbot.pypy.org Fri Apr 4 13:10:52 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 13:10:52 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: add one of the "Remaining cases are probably not interesting". Message-ID: <20140404111052.EC9B21C011F@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70434:341b7805d985 Date: 2014-04-04 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/341b7805d985/ Log: add one of the "Remaining cases are probably not interesting". When jumping to a loop that expects something to be a constant, it's fine to invent a new guard_value. diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -252,7 +252,24 @@ guards = [] vstate2.generate_guards(vstate3, [self.nodebox, self.nodebox], self.cpu, guards) - + + def test_known_value_virtualstate(self): + box1 = BoxInt(1) + box2 = BoxInt(1) + value1 = OptValue(box1) + value2 = OptValue(box2) + value1.make_constant(ConstInt(1)) + vstate1 = VirtualState([NotVirtualStateInfo(value1)]) + vstate2 = VirtualState([NotVirtualStateInfo(value2)]) + expected = """ + [i0] + guard_value(i0, 1) [] + """ + guards = [] + vstate1.generate_guards(vstate2, [box2], self.cpu, guards) + self.compare(guards, expected, [box2]) + + def test_virtuals_with_equal_fields(self): info1 = VirtualStateInfo(ConstInt(42), [1, 2]) value = OptValue(self.nodebox) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -360,16 +360,20 @@ if self.is_opaque: raise InvalidLoop('Generating guards for opaque pointers is not safe') + # the following conditions always peek into the runtime value that the + # box had when tracing. This value is only used as an educated guess. + # It is used here to choose between either emitting a guard and jumping + # to an existing compiled loop or retracing the loop. Both alternatives + # will always generate correct behaviour, but performance will differ. + if (self.level == LEVEL_CONSTANT and + self.constbox.same_constant(box.constbox())): + op = ResOperation(rop.GUARD_VALUE, [box, self.constbox], None) + extra_guards.append(op) + return + if self.level == LEVEL_KNOWNCLASS and \ box.nonnull() and \ self.known_class.same_constant(cpu.ts.cls_of_box(box)): - # Note: This is only a hint on what the class of box was - # during the trace. There are actually no guarentees that this - # box realy comes from a trace. The hint is used here to choose - # between either eimtting a guard_class and jumping to an - # excisting compiled loop or retracing the loop. Both - # alternatives will always generate correct behaviour, but - # performace will differ. op = ResOperation(rop.GUARD_NONNULL, [box], None) extra_guards.append(op) op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) From noreply at buildbot.pypy.org Fri Apr 4 13:10:54 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 13:10:54 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: this test is brittle, it fails when run with all others but passes in Message-ID: <20140404111054.44AC81C011F@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70435:282c41e394ed Date: 2014-04-04 13:10 +0200 http://bitbucket.org/pypy/pypy/changeset/282c41e394ed/ Log: this test is brittle, it fails when run with all others but passes in isolation. this is fixed by using constant ints diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -627,26 +627,26 @@ def test_constant(self): loops = """ - [p0] - p1 = same_as(ConstPtr(myptr)) - jump(p1) + [i0] + i1 = same_as(1) + jump(i1) """, """ - [p0] - p1 = same_as(ConstPtr(myptr2)) - jump(p1) + [i0] + i1 = same_as(2) + jump(i1) """, """ - [p0] - jump(p0) + [i0] + jump(i0) """ expected = """ - [p0] + [i0] jump() """ self.optimize_bridge(loops, loops[0], expected, 'Loop0') self.optimize_bridge(loops, loops[1], expected, 'Loop1') expected = """ - [p0] - jump(p0) + [i0] + jump(i0) """ self.optimize_bridge(loops, loops[2], expected, 'Loop2') From noreply at buildbot.pypy.org Fri Apr 4 14:59:56 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 14:59:56 +0200 (CEST) Subject: [pypy-commit] pypy default: found an unroll miscompilation Message-ID: <20140404125956.B479D1C309D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70436:2b8a59268ac7 Date: 2014-04-04 14:59 +0200 http://bitbucket.org/pypy/pypy/changeset/2b8a59268ac7/ Log: found an unroll miscompilation diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -501,6 +501,59 @@ return g(n, 1) + g(n, 2) assert self.meta_interp(h, [25]) == 7 * 25 * (7 + 8 + 2 + 3) + def test_two_bridged_loops_classes(self): + py.test.skip("fix me :-((((") + myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'n', 'x', 's']) + class A(object): + pass + bytecode = "zI7izI8i" + def f(n, s): + i = x = 0 + pos = 0 + op = '-' + while pos < len(bytecode): + myjitdriver.jit_merge_point(pos=pos, i=i, n=n, s=s, x=x) + op = bytecode[pos] + if op == 'z': + i = 0 + if op == 'i': + i += 1 + pos -= 2 + myjitdriver.can_enter_jit(pos=pos, i=i, n=n, s=s, x=x) + continue + elif op == 'I': + if not (i < n): + pos += 2 + elif op == '7': + if s is not None: + x = x + 7 + else: + x = x + 2 + elif op == '8': + if s is not None: + x = x + 8 + else: + x = x + 3 + + pos += 1 + return x + + def g(n, s): + if s == 2: + s = None + else: + s = A() + sa = 0 + for i in range(7): + sa += f(n, s) + return sa + assert self.meta_interp(g, [25, 1]) == g(25, 1) + + def h(n): + return g(n, 1) + g(n, 2) + assert self.meta_interp(h, [25]) == h(25) + + def test_three_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x']) bytecode = ".+357" From noreply at buildbot.pypy.org Fri Apr 4 15:30:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 15:30:39 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Fix for "don't conflict too much" Message-ID: <20140404133039.1BFF51C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5181:5b2495e71978 Date: 2014-04-04 15:23 +0200 http://bitbucket.org/pypy/extradoc/changeset/5b2495e71978/ Log: Fix for "don't conflict too much" diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -96,8 +96,6 @@ applications should perform better than in a regular PyPy. (All numbers presented here are comparing different versions of PyPy which all have the JIT enabled.) -XXX I wonder whether we need to add a caveat like "for applications that don't -conflict too much" somewhere You will find below a sketch of the `work plan`_. If more money than requested is collected, then the excess will be entered into the general @@ -300,6 +298,10 @@ * Forking the process is slow. +* We don't foresee particularly high conflict rates in regular + multithreaded programs, but this assertion needs to be checked + and possibly some heuristics improved. + Fixing all these issues is required before we can confidently say that PyPy-TM is an out-of-the-box replacement of a regular PyPy which gives speed-ups over the regular PyPy independently of the Python program it @@ -326,7 +328,12 @@ likely to be found, and communicates it to the system, using some lightweight library on top of ``with atomic``. -This introduces new issues. At the very least, we need a way to get +However, this introduces new issues. The main one is that by forcing +transactions to be longer, "conflicts" will become more common, up to +the point of partially or completely offsetting the benefit of using +PyPy-TM in the first place. + +So the programmer using PyPy-TM needs a way to get feedback about what conflicts we get in these long-running transactions, and where they are produced. A first step will be to implement getting "tracebacks" that point to the places where the most time is lost. This From noreply at buildbot.pypy.org Fri Apr 4 15:30:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 15:30:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Remove this for now Message-ID: <20140404133040.50DDD1C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5182:e1614783d2ed Date: 2014-04-04 15:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/e1614783d2ed/ Log: Remove this for now diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -261,8 +261,7 @@ Paid work will be at $60/hour, but at least one developer who will work on the project – Armin Rigo – has committed to 2 hours of volunteer work per paid hour (so the total amount of money that we ask is divided -by three XXX this sounds like *only* armin works on the project. maybe just -divide by two?). A 10% general donation will go to the `Software Freedom +by three). A 10% general donation will go to the `Software Freedom Conservancy`_ itself, the non-profit organization of which the PyPy project is a member and which manages all the issues related to donations, payments, and tax-exempt status. From noreply at buildbot.pypy.org Fri Apr 4 16:43:57 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 16:43:57 +0200 (CEST) Subject: [pypy-commit] pypy default: fix typo Message-ID: <20140404144357.0B1931C31F9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70437:709009b66802 Date: 2014-04-04 16:43 +0200 http://bitbucket.org/pypy/pypy/changeset/709009b66802/ Log: fix typo diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -296,7 +296,7 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state @@ -403,9 +403,9 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) - def finilize_short_preamble(self, start_label): + def finalize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP target_token = start_label.getdescr() @@ -584,6 +584,7 @@ for guard in extra_guards: if guard.is_guard(): + import pdb; pdb.set_trace() descr = target.resume_at_jump_descr.clone_if_mutable() inliner.inline_descr_inplace(descr) guard.setdescr(descr) From noreply at buildbot.pypy.org Fri Apr 4 16:43:58 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 16:43:58 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify test Message-ID: <20140404144358.601241C31F9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70438:133dce379a38 Date: 2014-04-04 16:43 +0200 http://bitbucket.org/pypy/pypy/changeset/133dce379a38/ Log: simplify test diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -506,7 +506,7 @@ myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'n', 'x', 's']) class A(object): pass - bytecode = "zI7izI8i" + bytecode = "I7i" def f(n, s): i = x = 0 pos = 0 @@ -514,8 +514,6 @@ while pos < len(bytecode): myjitdriver.jit_merge_point(pos=pos, i=i, n=n, s=s, x=x) op = bytecode[pos] - if op == 'z': - i = 0 if op == 'i': i += 1 pos -= 2 @@ -529,12 +527,6 @@ x = x + 7 else: x = x + 2 - elif op == '8': - if s is not None: - x = x + 8 - else: - x = x + 3 - pos += 1 return x @@ -547,7 +539,7 @@ for i in range(7): sa += f(n, s) return sa - assert self.meta_interp(g, [25, 1]) == g(25, 1) + #assert self.meta_interp(g, [25, 1]) == g(25, 1) def h(n): return g(n, 1) + g(n, 2) From noreply at buildbot.pypy.org Fri Apr 4 16:52:42 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Fri, 4 Apr 2014 16:52:42 +0200 (CEST) Subject: [pypy-commit] cffi define-integer-constant: Handle define constant by "#define DOT 0x1FF" Message-ID: <20140404145242.C86931C31F9@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: define-integer-constant Changeset: r1490:c5e6470d1dc0 Date: 2014-04-02 17:20 +0800 http://bitbucket.org/cffi/cffi/changeset/c5e6470d1dc0/ Log: Handle define constant by "#define DOT 0x1FF" diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -443,6 +443,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -170,12 +171,7 @@ def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -211,6 +207,24 @@ e.args = (e.args[0] + "\n *** Err: %s" % msg,) raise + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + pyvalue = int(match.group(0).rstrip("ULul"), 0) + if key not in self._int_constants: + self._int_constants[key] = pyvalue + else: + raise api.FFIError( + "multiple declarations of constant %s" % (key,)) + elif value == '...': + self._declare('macro ' + key, value) + else: + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) + def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): From noreply at buildbot.pypy.org Fri Apr 4 16:52:43 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Fri, 4 Apr 2014 16:52:43 +0200 (CEST) Subject: [pypy-commit] cffi define-integer-constant: Add test to '#define DOT 0x1FF' like defines Message-ID: <20140404145243.ECB901C31F9@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: define-integer-constant Changeset: r1491:092a42e02b30 Date: 2014-04-02 19:08 +0800 http://bitbucket.org/cffi/cffi/changeset/092a42e02b30/ Log: Add test to '#define DOT 0x1FF' like defines diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1581,3 +1581,21 @@ assert s[0].a == b'X' assert s[1].b == -4892220 assert s[1].a == b'Y' + + def test_define_integer_constant(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + #define DOT 100 + #define DOT_OCT 0100l + #define DOT_HEX 0x100u + #define DOT_UL 1000UL + enum foo {AA, BB=DOT, CC}; + """) + lib = ffi.dlopen(None) + assert ffi.string(ffi.cast("enum foo", 100)) == "BB" + assert lib.DOT == 100 + assert lib.DOT_OCT == 0o100 + assert lib.DOT_HEX == 0x100 + assert lib.DOT_UL == 1000 + + From noreply at buildbot.pypy.org Fri Apr 4 16:52:45 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Fri, 4 Apr 2014 16:52:45 +0200 (CEST) Subject: [pypy-commit] cffi define-integer-constant: "010" is not valid oct in python3. Fix it. Message-ID: <20140404145245.1DD341C31F9@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: define-integer-constant Changeset: r1492:a9bac7f13149 Date: 2014-04-03 18:59 +0800 http://bitbucket.org/cffi/cffi/changeset/a9bac7f13149/ Log: "010" is not valid oct in python3. Fix it. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -212,7 +212,15 @@ value = value.strip() match = _r_int_literal.search(value) if match is not None: - pyvalue = int(match.group(0).rstrip("ULul"), 0) + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) if key not in self._int_constants: self._int_constants[key] = pyvalue else: diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1585,6 +1585,7 @@ def test_define_integer_constant(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" + #define DOT_0 0 #define DOT 100 #define DOT_OCT 0100l #define DOT_HEX 0x100u @@ -1593,6 +1594,7 @@ """) lib = ffi.dlopen(None) assert ffi.string(ffi.cast("enum foo", 100)) == "BB" + assert lib.DOT_0 == 0 assert lib.DOT == 100 assert lib.DOT_OCT == 0o100 assert lib.DOT_HEX == 0x100 From noreply at buildbot.pypy.org Fri Apr 4 16:52:46 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Fri, 4 Apr 2014 16:52:46 +0200 (CEST) Subject: [pypy-commit] cffi define-integer-constant: Refactor copy&paste of raise dup constant into method Message-ID: <20140404145246.430951C31F9@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: define-integer-constant Changeset: r1493:3626870c2ce8 Date: 2014-04-03 19:10 +0800 http://bitbucket.org/cffi/cffi/changeset/3626870c2ce8/ Log: Refactor copy&paste of raise dup constant into method diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -207,6 +207,12 @@ e.args = (e.args[0] + "\n *** Err: %s" % msg,) raise + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + def _process_macros(self, macros): for key, value in macros.items(): value = value.strip() @@ -221,11 +227,7 @@ int_str = "0o" + int_str[1:] pyvalue = int(int_str, 0) - if key not in self._int_constants: - self._int_constants[key] = pyvalue - else: - raise api.FFIError( - "multiple declarations of constant %s" % (key,)) + self._add_constants(key, pyvalue) elif value == '...': self._declare('macro ' + key, value) else: @@ -564,11 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) - if enum.name in self._int_constants: - raise api.FFIError( - "multiple declarations of constant %s" % (enum.name,)) - - self._int_constants[enum.name] = nextenumvalue + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -583,8 +581,4 @@ if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) for k, v in other._int_constants.items(): - if k not in self._int_constants: - self._int_constants[k] = v - else: - raise api.FFIError( - "multiple declarations of constant %s" % (k,)) + self._add_constants(k, v) From noreply at buildbot.pypy.org Fri Apr 4 16:52:47 2014 From: noreply at buildbot.pypy.org (mozbugbox) Date: Fri, 4 Apr 2014 16:52:47 +0200 (CEST) Subject: [pypy-commit] cffi define-integer-constant: Add more test to "#define FOO 0X10" Message-ID: <20140404145247.60AE51C31F9@cobra.cs.uni-duesseldorf.de> Author: mozbugbox Branch: define-integer-constant Changeset: r1494:4c0b4f9eb00b Date: 2014-04-04 17:43 +0800 http://bitbucket.org/cffi/cffi/changeset/4c0b4f9eb00b/ Log: Add more test to "#define FOO 0X10" diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1589,6 +1589,7 @@ #define DOT 100 #define DOT_OCT 0100l #define DOT_HEX 0x100u + #define DOT_HEX2 0X10 #define DOT_UL 1000UL enum foo {AA, BB=DOT, CC}; """) @@ -1598,6 +1599,7 @@ assert lib.DOT == 100 assert lib.DOT_OCT == 0o100 assert lib.DOT_HEX == 0x100 + assert lib.DOT_HEX2 == 0x10 assert lib.DOT_UL == 1000 From noreply at buildbot.pypy.org Fri Apr 4 16:52:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 16:52:48 +0200 (CEST) Subject: [pypy-commit] cffi default: Merged in mozbugbox/cffi/define-integer-constant (pull request #31) Message-ID: <20140404145248.759C61C31F9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1495:6f31a53a6275 Date: 2014-04-04 16:52 +0200 http://bitbucket.org/cffi/cffi/changeset/6f31a53a6275/ Log: Merged in mozbugbox/cffi/define-integer-constant (pull request #31) Handle "#define DOT 0x1FF" like defines diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -443,6 +443,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -170,12 +171,7 @@ def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -211,6 +207,34 @@ e.args = (e.args[0] + "\n *** Err: %s" % msg,) raise + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) + else: + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) + def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): @@ -542,11 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) - if enum.name in self._int_constants: - raise api.FFIError( - "multiple declarations of constant %s" % (enum.name,)) - - self._int_constants[enum.name] = nextenumvalue + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -561,8 +581,4 @@ if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) for k, v in other._int_constants.items(): - if k not in self._int_constants: - self._int_constants[k] = v - else: - raise api.FFIError( - "multiple declarations of constant %s" % (k,)) + self._add_constants(k, v) diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1581,3 +1581,25 @@ assert s[0].a == b'X' assert s[1].b == -4892220 assert s[1].a == b'Y' + + def test_define_integer_constant(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + #define DOT_0 0 + #define DOT 100 + #define DOT_OCT 0100l + #define DOT_HEX 0x100u + #define DOT_HEX2 0X10 + #define DOT_UL 1000UL + enum foo {AA, BB=DOT, CC}; + """) + lib = ffi.dlopen(None) + assert ffi.string(ffi.cast("enum foo", 100)) == "BB" + assert lib.DOT_0 == 0 + assert lib.DOT == 100 + assert lib.DOT_OCT == 0o100 + assert lib.DOT_HEX == 0x100 + assert lib.DOT_HEX2 == 0x10 + assert lib.DOT_UL == 1000 + + From noreply at buildbot.pypy.org Fri Apr 4 17:10:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 17:10:19 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix test for 6f31a53a6275 Message-ID: <20140404151019.DC3591C3204@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1496:0457fbb2d452 Date: 2014-04-04 17:09 +0200 http://bitbucket.org/cffi/cffi/changeset/0457fbb2d452/ Log: Fix test for 6f31a53a6275 diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -161,9 +161,10 @@ def test_define_not_supported_for_now(): ffi = FFI(backend=FakeBackend()) - e = py.test.raises(CDefError, ffi.cdef, "#define FOO 42") - assert str(e.value) == \ - 'only supports the syntax "#define FOO ..." for now (literally)' + e = py.test.raises(CDefError, ffi.cdef, '#define FOO "blah"') + assert str(e.value) == ( + 'only supports the syntax "#define FOO ..." (literally)' + ' or "#define FOO 0x1FF" for now') def test_unnamed_struct(): ffi = FFI(backend=FakeBackend()) From noreply at buildbot.pypy.org Fri Apr 4 17:20:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 17:20:42 +0200 (CEST) Subject: [pypy-commit] cffi default: Issue #142: don't generate C files that use '$' in identifiers. Message-ID: <20140404152042.1E67C1C3204@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1497:facaf5de29c9 Date: 2014-04-04 17:20 +0200 http://bitbucket.org/cffi/cffi/changeset/facaf5de29c9/ Log: Issue #142: don't generate C files that use '$' in identifiers. diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -632,13 +632,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1,4 +1,4 @@ -import py +import py, re import sys, os, math, weakref from cffi import FFI, VerificationError, VerificationMissing, model from testing.support import * @@ -29,6 +29,24 @@ def setup_module(): import cffi.verifier cffi.verifier.cleanup_tmpdir() + # + # check that no $ sign is produced in the C file; it used to be the + # case that anonymous enums would produce '$enum_$1', which was + # used as part of a function name. GCC accepts such names, but it's + # apparently non-standard. + _r_comment = re.compile(r"/\*.*?\*/|//.*?$", re.DOTALL | re.MULTILINE) + _r_string = re.compile(r'\".*?\"') + def _write_source_and_check(self, file=None): + base_write_source(self, file) + if file is None: + f = open(self.sourcefilename) + data = f.read() + f.close() + data = _r_comment.sub(' ', data) + data = _r_string.sub('"skipped"', data) + assert '$' not in data + base_write_source = cffi.verifier.Verifier._write_source + cffi.verifier.Verifier._write_source = _write_source_and_check def test_module_type(): From noreply at buildbot.pypy.org Fri Apr 4 17:26:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 17:26:28 +0200 (CEST) Subject: [pypy-commit] cffi default: Issue #144: copy the "#if...#include " done in _cffi_backend.c. Message-ID: <20140404152628.286FC1D2B30@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1498:41b3ef920695 Date: 2014-04-04 17:26 +0200 http://bitbucket.org/cffi/cffi/changeset/41b3ef920695/ Log: Issue #144: copy the "#if...#include " done in _cffi_backend.c. diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -776,6 +776,10 @@ typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; typedef unsigned char _Bool; +#else +#if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +#endif #endif #if PY_MAJOR_VERSION < 3 From noreply at buildbot.pypy.org Fri Apr 4 17:30:19 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 17:30:19 +0200 (CEST) Subject: [pypy-commit] pypy default: some cleanups (no behaviour changes yet) Message-ID: <20140404153019.8E3301D2B7D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70439:ec122f84c324 Date: 2014-04-04 17:29 +0200 http://bitbucket.org/pypy/pypy/changeset/ec122f84c324/ Log: some cleanups (no behaviour changes yet) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -584,7 +584,6 @@ for guard in extra_guards: if guard.is_guard(): - import pdb; pdb.set_trace() descr = target.resume_at_jump_descr.clone_if_mutable() inliner.inline_descr_inplace(descr) guard.setdescr(descr) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -18,7 +18,10 @@ position = -1 def generalization_of(self, other, renum, bad): - raise NotImplementedError + result = self.generalization_of_dont_handle_bad(other, renum, bad) + if not result: + bad[self] = bad[other] = None + return result def generate_guards(self, other, box, cpu, extra_guards, renum): if self.generalization_of(other, renum, {}): @@ -67,37 +70,27 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): + def generalization_of_dont_handle_bad(self, other, renum, bad): assert self.position != -1 if self.position in renum: if renum[self.position] == other.position: return True - bad[self] = None - bad[other] = None return False renum[self.position] = other.position if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, AbstractVirtualStructStateInfo) assert len(self.fielddescrs) == len(self.fieldstate) assert len(other.fielddescrs) == len(other.fieldstate) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False for i in range(len(self.fielddescrs)): if other.fielddescrs[i] is not self.fielddescrs[i]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -130,11 +123,8 @@ self.known_class = known_class def _generalization_of(self, other): - if not isinstance(other, VirtualStateInfo): - return False - if not self.known_class.same_constant(other.known_class): - return False - return True + return (isinstance(other, VirtualStateInfo) and + self.known_class.same_constant(other.known_class)) def debug_header(self, indent): debug_print(indent + 'VirtualStateInfo(%d):' % self.position) @@ -146,11 +136,8 @@ self.typedescr = typedescr def _generalization_of(self, other): - if not isinstance(other, VStructStateInfo): - return False - if self.typedescr is not other.typedescr: - return False - return True + return (isinstance(other, VStructStateInfo) and + self.typedescr is other.typedescr) def debug_header(self, indent): debug_print(indent + 'VStructStateInfo(%d):' % self.position) @@ -165,28 +152,20 @@ return (isinstance(other, VArrayStateInfo) and self.arraydescr is other.arraydescr) - def generalization_of(self, other, renum, bad): + def generalization_of_dont_handle_bad(self, other, renum, bad): assert self.position != -1 if self.position in renum: if renum[self.position] == other.position: return True - bad[self] = None - bad[other] = None return False renum[self.position] = other.position if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False if len(self.fieldstate) != len(other.fieldstate): - bad[self] = None - bad[other] = None return False for i in range(len(self.fieldstate)): if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -216,41 +195,29 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): + def generalization_of_dont_handle_bad(self, other, renum, bad): assert self.position != -1 if self.position in renum: if renum[self.position] == other.position: return True - bad[self] = None - bad[other] = None return False renum[self.position] = other.position if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, VArrayStructStateInfo) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False p = 0 for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): - bad[self] = None - bad[other] = None return False for j in range(len(self.fielddescrs[i])): if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[p].generalization_of(other.fieldstate[p], renum, bad): - bad[self] = None - bad[other] = None return False p += 1 return True @@ -302,49 +269,33 @@ self.position_in_notvirtuals = -1 self.lenbound = value.lenbound - def generalization_of(self, other, renum, bad): + def generalization_of_dont_handle_bad(self, other, renum, bad): # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? assert self.position != -1 if self.position in renum: if renum[self.position] == other.position: return True - bad[self] = None - bad[other] = None return False renum[self.position] = other.position if not isinstance(other, NotVirtualStateInfo): - bad[self] = None - bad[other] = None return False if other.level < self.level: - bad[self] = None - bad[other] = None return False if self.level == LEVEL_CONSTANT: if not self.constbox.same_constant(other.constbox): - bad[self] = None - bad[other] = None return False elif self.level == LEVEL_KNOWNCLASS: if not self.known_class.same_constant(other.known_class): - bad[self] = None - bad[other] = None return False if not self.intbound.contains_bound(other.intbound): - bad[self] = None - bad[other] = None return False if self.lenbound and other.lenbound: if self.lenbound.mode != other.lenbound.mode or \ self.lenbound.descr != other.lenbound.descr or \ not self.lenbound.bound.contains_bound(other.lenbound.bound): - bad[self] = None - bad[other] = None return False elif self.lenbound: - bad[self] = None - bad[other] = None return False return True From noreply at buildbot.pypy.org Fri Apr 4 17:30:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 17:30:24 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix a type mismatch that causes a warning on some compilers Message-ID: <20140404153024.2AA0C1D2B7F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1499:51d87933eb4b Date: 2014-04-04 17:30 +0200 http://bitbucket.org/cffi/cffi/changeset/51d87933eb4b/ Log: Fix a type mismatch that causes a warning on some compilers diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1654,8 +1654,8 @@ ffi = FFI() ffi.cdef(""" int (*python_callback)(int how_many, int *values); - void *const c_callback; /* pass this ptr to C routines */ - int some_c_function(void *cb); + int (*const c_callback)(int,...); /* pass this ptr to C routines */ + int some_c_function(int(*cb)(int,...)); """) lib = ffi.verify(""" #include From noreply at buildbot.pypy.org Fri Apr 4 17:34:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 17:34:56 +0200 (CEST) Subject: [pypy-commit] cffi default: Issue #145: don't run any test modifying stdout/stderr on any non-Linux Message-ID: <20140404153456.487F31D2BB1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1500:237031079adc Date: 2014-04-04 17:34 +0200 http://bitbucket.org/cffi/cffi/changeset/237031079adc/ Log: Issue #145: don't run any test modifying stdout/stderr on any non- Linux platform, as it seems to only work reliably with the glibc. diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1102,7 +1102,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1112,7 +1112,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1124,7 +1124,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1470,8 +1470,8 @@ assert func() == 42 def test_FILE_stored_in_stdout(): - if sys.platform == 'win32': - py.test.skip("MSVC: cannot assign to stdout") + if not sys.platform.startswith('linux'): + py.test.skip("likely, we cannot assign to stdout") ffi = FFI() ffi.cdef("int printf(const char *, ...); FILE *setstdout(FILE *);") lib = ffi.verify(""" From noreply at buildbot.pypy.org Fri Apr 4 18:43:09 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 18:43:09 +0200 (CEST) Subject: [pypy-commit] pypy default: move copy-pasted code Message-ID: <20140404164309.2505E1C31F9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70440:e125ed3d7334 Date: 2014-04-04 17:39 +0200 http://bitbucket.org/pypy/pypy/changeset/e125ed3d7334/ Log: move copy-pasted code diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -18,7 +18,12 @@ position = -1 def generalization_of(self, other, renum, bad): - result = self.generalization_of_dont_handle_bad(other, renum, bad) + assert self.position != -1 + if self.position in renum: + result = renum[self.position] == other.position + else: + renum[self.position] = other.position + result = self.generalization_of_dont_handle_bad(other, renum, bad) if not result: bad[self] = bad[other] = None return result @@ -71,12 +76,6 @@ self.fielddescrs = fielddescrs def generalization_of_dont_handle_bad(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - return False - renum[self.position] = other.position if not self._generalization_of(other): return False @@ -153,12 +152,6 @@ self.arraydescr is other.arraydescr) def generalization_of_dont_handle_bad(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - return False - renum[self.position] = other.position if not self._generalization_of(other): return False if len(self.fieldstate) != len(other.fieldstate): @@ -196,12 +189,6 @@ self.fielddescrs = fielddescrs def generalization_of_dont_handle_bad(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - return False - renum[self.position] = other.position if not self._generalization_of(other): return False @@ -272,12 +259,6 @@ def generalization_of_dont_handle_bad(self, other, renum, bad): # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - return False - renum[self.position] = other.position if not isinstance(other, NotVirtualStateInfo): return False if other.level < self.level: From noreply at buildbot.pypy.org Fri Apr 4 18:44:08 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 18:44:08 +0200 (CEST) Subject: [pypy-commit] pypy default: better method name Message-ID: <20140404164408.2679C1C31F9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70442:c7a48d308402 Date: 2014-04-04 18:43 +0200 http://bitbucket.org/pypy/pypy/changeset/c7a48d308402/ Log: better method name diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -23,7 +23,7 @@ result = renum[self.position] == other.position else: renum[self.position] = other.position - result = self.generalization_of_dont_handle_bad(other, renum, bad) + result = self.generalization_of_renumbering_done(other, renum, bad) if not result: bad[self] = bad[other] = None return result @@ -75,7 +75,7 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def generalization_of_dont_handle_bad(self, other, renum, bad): + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): return False @@ -151,7 +151,7 @@ return (isinstance(other, VArrayStateInfo) and self.arraydescr is other.arraydescr) - def generalization_of_dont_handle_bad(self, other, renum, bad): + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): return False if len(self.fieldstate) != len(other.fieldstate): @@ -188,7 +188,7 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def generalization_of_dont_handle_bad(self, other, renum, bad): + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): return False @@ -256,7 +256,7 @@ self.position_in_notvirtuals = -1 self.lenbound = value.lenbound - def generalization_of_dont_handle_bad(self, other, renum, bad): + def generalization_of_renumbering_done(self, other, renum, bad): # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? if not isinstance(other, NotVirtualStateInfo): From noreply at buildbot.pypy.org Fri Apr 4 18:43:10 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 18:43:10 +0200 (CEST) Subject: [pypy-commit] pypy default: test and fix: NonNull is not more general than NULL (!!!) Message-ID: <20140404164310.6480D1C31F9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70441:e16936f60a43 Date: 2014-04-04 18:01 +0200 http://bitbucket.org/pypy/pypy/changeset/e16936f60a43/ Log: test and fix: NonNull is not more general than NULL (!!!) fixes the test, but another bug is still around diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -89,6 +89,11 @@ assert isgeneral(OptValue(ConstPtr(fooref)), OptValue(ConstPtr(fooref))) + value1 = OptValue(BoxPtr()) + value1.make_nonnull(None) + value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + assert not isgeneral(value1, value2) + def test_field_matching_generalization(self): const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -269,6 +269,10 @@ elif self.level == LEVEL_KNOWNCLASS: if not self.known_class.same_constant(other.known_class): return False + elif self.level == LEVEL_NONNULL: + if other.constbox and not other.constbox.nonnull(): + return False + if not self.intbound.contains_bound(other.intbound): return False if self.lenbound and other.lenbound: diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -502,7 +502,6 @@ assert self.meta_interp(h, [25]) == 7 * 25 * (7 + 8 + 2 + 3) def test_two_bridged_loops_classes(self): - py.test.skip("fix me :-((((") myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'n', 'x', 's']) class A(object): pass From noreply at buildbot.pypy.org Fri Apr 4 18:47:15 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 18:47:15 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: check differently Message-ID: <20140404164715.7DED31C31F9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70443:69a33b08802a Date: 2014-04-04 18:45 +0200 http://bitbucket.org/pypy/pypy/changeset/69a33b08802a/ Log: check differently diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -495,11 +495,12 @@ for i in range(7): sa += f(n, s) return sa - assert self.meta_interp(g, [25, 1]) == 7 * 25 * (7 + 8) + assert self.meta_interp(g, [25, 1]) == g(25, 1) def h(n): return g(n, 1) + g(n, 2) - assert self.meta_interp(h, [25]) == 7 * 25 * (7 + 8 + 2 + 3) + assert self.meta_interp(h, [25]) == h(25) + def test_three_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x']) From noreply at buildbot.pypy.org Fri Apr 4 18:47:16 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 4 Apr 2014 18:47:16 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: merge default Message-ID: <20140404164716.C99B61C31F9@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70444:0c9d8beef0c6 Date: 2014-04-04 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/0c9d8beef0c6/ Log: merge default diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -1,10 +1,17 @@ import py from rpython.config.translationoption import get_combined_translation_config from rpython.config.translationoption import set_opt_level -from rpython.config.config import ConflictConfigError +from rpython.config.config import ConflictConfigError, ConfigError +from rpython.translator.platform import platform as compiler def test_no_gcrootfinder_with_boehm(): config = get_combined_translation_config() config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') + +if compiler.name == 'msvc': + def test_no_asmgcrot_on_msvc(): + config = get_combined_translation_config() + py.test.raises(ConfigError, config.translation.setoption, + 'gcrootfinder', 'asmgcc', 'user') diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,9 +1,10 @@ import sys import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption -from rpython.config.config import ChoiceOption, StrOption, Config +from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors +from rpython.translator.platform import platform as compiler DEFL_INLINE_THRESHOLD = 32.4 # just enough to inline add__Int_Int() # and just small enough to prevend inlining of some rlist functions. @@ -16,8 +17,13 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] +elif compiler.name == 'msvc': + DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -85,7 +91,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ROOTFINDERS, "shadowstack", cmdline="--gcrootfinder", requires={ diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py @@ -1,4 +1,9 @@ +import py from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests +from rpython.translator.platform import platform as compiler + +if compiler.name == 'msvc': + py.test.skip('asmgcc buggy on msvc') class TestAsmGcc(CompileFrameworkTests): gcrootfinder = "asmgcc" diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -89,6 +89,11 @@ assert isgeneral(OptValue(ConstPtr(fooref)), OptValue(ConstPtr(fooref))) + value1 = OptValue(BoxPtr()) + value1.make_nonnull(None) + value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + assert not isgeneral(value1, value2) + def test_field_matching_generalization(self): const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -296,7 +296,7 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state @@ -403,9 +403,9 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) - def finilize_short_preamble(self, start_label): + def finalize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP target_token = start_label.getdescr() diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -18,7 +18,15 @@ position = -1 def generalization_of(self, other, renum, bad): - raise NotImplementedError + assert self.position != -1 + if self.position in renum: + result = renum[self.position] == other.position + else: + renum[self.position] = other.position + result = self.generalization_of_renumbering_done(other, renum, bad) + if not result: + bad[self] = bad[other] = None + return result def generate_guards(self, other, box, cpu, extra_guards, renum): if self.generalization_of(other, renum, {}): @@ -67,37 +75,21 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, AbstractVirtualStructStateInfo) assert len(self.fielddescrs) == len(self.fieldstate) assert len(other.fielddescrs) == len(other.fieldstate) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False for i in range(len(self.fielddescrs)): if other.fielddescrs[i] is not self.fielddescrs[i]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -130,11 +122,8 @@ self.known_class = known_class def _generalization_of(self, other): - if not isinstance(other, VirtualStateInfo): - return False - if not self.known_class.same_constant(other.known_class): - return False - return True + return (isinstance(other, VirtualStateInfo) and + self.known_class.same_constant(other.known_class)) def debug_header(self, indent): debug_print(indent + 'VirtualStateInfo(%d):' % self.position) @@ -146,11 +135,8 @@ self.typedescr = typedescr def _generalization_of(self, other): - if not isinstance(other, VStructStateInfo): - return False - if self.typedescr is not other.typedescr: - return False - return True + return (isinstance(other, VStructStateInfo) and + self.typedescr is other.typedescr) def debug_header(self, indent): debug_print(indent + 'VStructStateInfo(%d):' % self.position) @@ -165,28 +151,14 @@ return (isinstance(other, VArrayStateInfo) and self.arraydescr is other.arraydescr) - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False if len(self.fieldstate) != len(other.fieldstate): - bad[self] = None - bad[other] = None return False for i in range(len(self.fieldstate)): if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -216,41 +188,23 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, VArrayStructStateInfo) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False p = 0 for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): - bad[self] = None - bad[other] = None return False for j in range(len(self.fielddescrs[i])): if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[p].generalization_of(other.fieldstate[p], renum, bad): - bad[self] = None - bad[other] = None return False p += 1 return True @@ -302,49 +256,31 @@ self.position_in_notvirtuals = -1 self.lenbound = value.lenbound - def generalization_of(self, other, renum, bad): + def generalization_of_renumbering_done(self, other, renum, bad): # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position if not isinstance(other, NotVirtualStateInfo): - bad[self] = None - bad[other] = None return False if other.level < self.level: - bad[self] = None - bad[other] = None return False if self.level == LEVEL_CONSTANT: if not self.constbox.same_constant(other.constbox): - bad[self] = None - bad[other] = None return False elif self.level == LEVEL_KNOWNCLASS: if not self.known_class.same_constant(other.known_class): - bad[self] = None - bad[other] = None return False + elif self.level == LEVEL_NONNULL: + if other.constbox and not other.constbox.nonnull(): + return False + if not self.intbound.contains_bound(other.intbound): - bad[self] = None - bad[other] = None return False if self.lenbound and other.lenbound: if self.lenbound.mode != other.lenbound.mode or \ self.lenbound.descr != other.lenbound.descr or \ not self.lenbound.bound.contains_bound(other.lenbound.bound): - bad[self] = None - bad[other] = None return False elif self.lenbound: - bad[self] = None - bad[other] = None return False return True diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -502,6 +502,50 @@ assert self.meta_interp(h, [25]) == h(25) + def test_two_bridged_loops_classes(self): + myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'n', 'x', 's']) + class A(object): + pass + bytecode = "I7i" + def f(n, s): + i = x = 0 + pos = 0 + op = '-' + while pos < len(bytecode): + myjitdriver.jit_merge_point(pos=pos, i=i, n=n, s=s, x=x) + op = bytecode[pos] + if op == 'i': + i += 1 + pos -= 2 + myjitdriver.can_enter_jit(pos=pos, i=i, n=n, s=s, x=x) + continue + elif op == 'I': + if not (i < n): + pos += 2 + elif op == '7': + if s is not None: + x = x + 7 + else: + x = x + 2 + pos += 1 + return x + + def g(n, s): + if s == 2: + s = None + else: + s = A() + sa = 0 + for i in range(7): + sa += f(n, s) + return sa + #assert self.meta_interp(g, [25, 1]) == g(25, 1) + + def h(n): + return g(n, 1) + g(n, 2) + assert self.meta_interp(h, [25]) == h(25) + + def test_three_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x']) bytecode = ".+357" From noreply at buildbot.pypy.org Fri Apr 4 22:33:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Apr 2014 22:33:46 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Write down an XXX explicitly Message-ID: <20140404203346.943441C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5183:84ccece0fc17 Date: 2014-04-04 22:33 +0200 http://bitbucket.org/pypy/extradoc/changeset/84ccece0fc17/ Log: Write down an XXX explicitly diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -261,7 +261,8 @@ Paid work will be at $60/hour, but at least one developer who will work on the project – Armin Rigo – has committed to 2 hours of volunteer work per paid hour (so the total amount of money that we ask is divided -by three). A 10% general donation will go to the `Software Freedom +by three). (XXX check with Remi Meier) +A 10% general donation will go to the `Software Freedom Conservancy`_ itself, the non-profit organization of which the PyPy project is a member and which manages all the issues related to donations, payments, and tax-exempt status. From noreply at buildbot.pypy.org Sat Apr 5 09:18:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 09:18:21 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Feedback from Remi Message-ID: <20140405071821.6AB6D1C320C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5184:29ee29e6dbe2 Date: 2014-04-05 09:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/29ee29e6dbe2/ Log: Feedback from Remi diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -168,6 +168,13 @@ ultimately better for very-long-running transactions. None of the papers we know of discusses this issue. +Note that right now PyPy-STM has false conflicts within the same object, +e.g. within a list or a dictionary; but we can more easily do something +about it (see `goal 2_`). Also, it might be possible in PyPy-HTM to +arrange objects in memory ahead of time so that such conflicts are very +rare; but we will never get a rate of exactly 0%, which might be +required for very-long-running transactions. + .. _`Virtualizing Transactional Memory`: http://pages.cs.wisc.edu/~isca2005/papers/08A-02.PDF @@ -261,7 +268,8 @@ Paid work will be at $60/hour, but at least one developer who will work on the project – Armin Rigo – has committed to 2 hours of volunteer work per paid hour (so the total amount of money that we ask is divided -by three). (XXX check with Remi Meier) +by three); and another developer – Remi Meier – is a Ph.D. student +and gets paid from another source already. A 10% general donation will go to the `Software Freedom Conservancy`_ itself, the non-profit organization of which the PyPy project is a member and which manages all the issues related to From noreply at buildbot.pypy.org Sat Apr 5 09:39:03 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:03 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: removed non-existing documents Message-ID: <20140405073903.0498E1C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70445:4cd8d387ead0 Date: 2014-04-03 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/4cd8d387ead0/ Log: removed non-existing documents - dot-net.rst: removed in 429434b97ea511f8a43dcad4f2e1b170cb7d1153 - discussion/outline-external-ootype.rst: removed in 66ce9bd318a906287dc7df35452d711f26ce61b9 - discussion/M-integration.rst: removed in 66ce9bd318a906287dc7df35452d711f26ce61b9 - needswork.txt: removed in b664434c47ded8c0d87011a384fc04dc1ca51671 diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** From noreply at buildbot.pypy.org Sat Apr 5 09:39:04 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:04 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: Fixed path to lib_pypy tests. Not sure if the whole section Message-ID: <20140405073904.416FF1C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70446:692496e00c80 Date: 2014-04-03 10:18 +0200 http://bitbucket.org/pypy/pypy/changeset/692496e00c80/ Log: Fixed path to lib_pypy tests. Not sure if the whole section still makes sense. lib_pypy/pypy_tests/ was moved in 8b504e234b7c2b45015305117354b1eb8e5acdd8 (PR #113) diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations From noreply at buildbot.pypy.org Sat Apr 5 09:39:05 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:05 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: corrected URL to Boehm-Demers-Weiser GC Message-ID: <20140405073905.8EDB11C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70447:68f3c8ecb722 Date: 2014-04-03 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/68f3c8ecb722/ Log: corrected URL to Boehm-Demers-Weiser GC As far as I can tell this is the current and most recent webpage diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html From noreply at buildbot.pypy.org Sat Apr 5 09:39:06 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:06 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: more hyperlinks corrected Message-ID: <20140405073906.CCD6E1C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70448:adb800e4e698 Date: 2014-04-03 10:40 +0200 http://bitbucket.org/pypy/pypy/changeset/adb800e4e698/ Log: more hyperlinks corrected codespeak.net extradoc -> bitbucket.org extradoc codespeak.net pypy- dev -> python.org pypy-dev diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.html PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.html +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.html +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.html .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -222,9 +222,9 @@ detailing some of the works that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.html +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.html +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html .. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ PyPy Hildesheim2 finished: first self-contained PyPy run! @@ -239,11 +239,11 @@ having a good time. You might want to look at the selected `pictures from the sprint`_. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg .. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html .. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ @@ -264,15 +264,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +285,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.html PyPy at XP 2006 and Agile 2006 From noreply at buildbot.pypy.org Sat Apr 5 09:39:08 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:08 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: still finding dead links, fixing them Message-ID: <20140405073908.1599D1C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70449:bba657ad7d50 Date: 2014-04-03 10:46 +0200 http://bitbucket.org/pypy/pypy/changeset/bba657ad7d50/ Log: still finding dead links, fixing them diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- From noreply at buildbot.pypy.org Sat Apr 5 09:39:09 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:09 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: more broken URLs fixed in docs. Message-ID: <20140405073909.532D51C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70450:7e38344a56f1 Date: 2014-04-03 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/7e38344a56f1/ Log: more broken URLs fixed in docs. diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.html + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -86,7 +86,7 @@ option (this is the default at some optimization levels like ``-O1``, but unneeded for high-performance translations like ``-O2``). You may get it at -http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc-7.1.tar.gz +http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with pypy. Unpack this folder in the base directory. Then open a command From noreply at buildbot.pypy.org Sat Apr 5 09:39:10 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:10 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: use default branch instead of the tip Message-ID: <20140405073910.9D7921C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70451:fc346381ae35 Date: 2014-04-03 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/fc346381ae35/ Log: use default branch instead of the tip diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/rpython/rtyper/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/rpython/rtyper/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are From noreply at buildbot.pypy.org Sat Apr 5 09:39:11 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:11 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: final dead link fix in docs Message-ID: <20140405073911.DA2DE1C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70452:f8ede69c3f52 Date: 2014-04-03 11:21 +0200 http://bitbucket.org/pypy/pypy/changeset/f8ede69c3f52/ Log: final dead link fix in docs diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -42,7 +42,7 @@ on the `people page`_. .. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html -.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -129,7 +129,7 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt .. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html .. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html @@ -187,7 +187,7 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style .. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html @@ -222,8 +222,8 @@ detailing some of the works that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.html +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt .. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html .. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ @@ -285,8 +285,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html -.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,7 +42,7 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.html + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt From noreply at buildbot.pypy.org Sat Apr 5 09:39:13 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:13 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: fix sphinx-doc warnings because of missing ref Message-ID: <20140405073913.2C8B71C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70453:cb5d6686d43d Date: 2014-04-03 11:43 +0200 http://bitbucket.org/pypy/pypy/changeset/cb5d6686d43d/ Log: fix sphinx-doc warnings because of missing ref These refs did never work and did not point to the bitbucket directory. For now, just use backquotes. diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -7,31 +7,31 @@ ================================= ============================================ Directory explanation/links ================================= ============================================ -`pypy/bin/`_ command-line scripts, mainly +``pypy/bin/`` command-line scripts, mainly `pypy/bin/pyinteractive.py`_ -`pypy/config/`_ handles the numerous options for building +``pypy/config/`` handles the numerous options for building and running PyPy -`pypy/doc/`_ text versions of PyPy developer +``pypy/doc/`` text versions of PyPy developer documentation -`pypy/doc/config/`_ documentation for the numerous translation +``pypy/doc/config/`` documentation for the numerous translation options -`pypy/doc/discussion/`_ drafts of ideas and documentation +``pypy/doc/discussion/`` drafts of ideas and documentation ``doc/*/`` other specific documentation topics or tools -`pypy/interpreter/`_ `bytecode interpreter`_ and related objects +``pypy/interpreter/`` `bytecode interpreter`_ and related objects (frames, functions, modules,...) -`pypy/interpreter/pyparser/`_ interpreter-level Python source parser +``pypy/interpreter/pyparser/`` interpreter-level Python source parser -`pypy/interpreter/astcompiler/`_ interpreter-level bytecode compiler, +``pypy/interpreter/astcompiler/`` interpreter-level bytecode compiler, via an AST representation -`pypy/module/`_ contains `mixed modules`_ +``pypy/module/`` contains `mixed modules`_ implementing core modules with both application and interpreter level code. Not all are finished and working. Use @@ -39,55 +39,55 @@ or ``--allworkingmodules`` translation options. -`pypy/objspace/`_ `object space`_ implementations +``pypy/objspace/`` `object space`_ implementations -`pypy/objspace/std/`_ the StdObjSpace_ implementing CPython's +``pypy/objspace/std/`` the StdObjSpace_ implementing CPython's objects and types -`pypy/tool/`_ various utilities and hacks used +``pypy/tool/`` various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +``pypy/tool/algo/`` general-purpose algorithmic and mathematic tools -`pypy/tool/pytest/`_ support code for our `testing methods`_ +``pypy/tool/pytest/`` support code for our `testing methods`_ -`rpython/annotator/`_ `type inferencing code`_ for +``rpython/annotator/`` `type inferencing code`_ for `RPython`_ programs -`rpython/config/`_ handles the numerous options for RPython +``rpython/config/`` handles the numerous options for RPython -`rpython/flowspace/`_ the FlowObjSpace_ implementing +``rpython/flowspace/`` the FlowObjSpace_ implementing `abstract interpretation`_ -`rpython/rlib/`_ a `"standard library"`_ for RPython_ +`rpython/rlib/`` a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +``rpython/rtyper/`` the `RPython Typer`_ -`rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for +``rpython/rtyper/lltypesystem/`` the `low-level type system`_ for C-like backends -`rpython/memory/`_ the `garbage collector`_ construction +``rpython/memory/`` the `garbage collector`_ construction framework -`rpython/translator/`_ translation_ backends and support code +``rpython/translator/`` translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +``rpython/translator/backendopt/`` general optimizations that run before a backend generates code -`rpython/translator/c/`_ the `GenC backend`_, producing C code +``rpython/translator/c/`` the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -`pypy/goal/`_ our `main PyPy-translation scripts`_ +``pypy/goal/`` our `main PyPy-translation scripts`_ live here -`rpython/translator/tool/`_ helper tools for translation +``rpython/translator/tool/`` helper tools for translation -`dotviewer/`_ `graph viewer`_ +``dotviewer/`` `graph viewer`_ ``*/test/`` many directories have a test subdirectory containing test From noreply at buildbot.pypy.org Sat Apr 5 09:39:14 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:14 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: Backout changeset cb5d6686d43d7cef22b7511a30d2fc9907b77ff8 Message-ID: <20140405073914.6BE2A1C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70454:6cf6b79290f0 Date: 2014-04-03 11:57 +0200 http://bitbucket.org/pypy/pypy/changeset/6cf6b79290f0/ Log: Backout changeset cb5d6686d43d7cef22b7511a30d2fc9907b77ff8 diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -7,31 +7,31 @@ ================================= ============================================ Directory explanation/links ================================= ============================================ -``pypy/bin/`` command-line scripts, mainly +`pypy/bin/`_ command-line scripts, mainly `pypy/bin/pyinteractive.py`_ -``pypy/config/`` handles the numerous options for building +`pypy/config/`_ handles the numerous options for building and running PyPy -``pypy/doc/`` text versions of PyPy developer +`pypy/doc/`_ text versions of PyPy developer documentation -``pypy/doc/config/`` documentation for the numerous translation +`pypy/doc/config/`_ documentation for the numerous translation options -``pypy/doc/discussion/`` drafts of ideas and documentation +`pypy/doc/discussion/`_ drafts of ideas and documentation ``doc/*/`` other specific documentation topics or tools -``pypy/interpreter/`` `bytecode interpreter`_ and related objects +`pypy/interpreter/`_ `bytecode interpreter`_ and related objects (frames, functions, modules,...) -``pypy/interpreter/pyparser/`` interpreter-level Python source parser +`pypy/interpreter/pyparser/`_ interpreter-level Python source parser -``pypy/interpreter/astcompiler/`` interpreter-level bytecode compiler, +`pypy/interpreter/astcompiler/`_ interpreter-level bytecode compiler, via an AST representation -``pypy/module/`` contains `mixed modules`_ +`pypy/module/`_ contains `mixed modules`_ implementing core modules with both application and interpreter level code. Not all are finished and working. Use @@ -39,55 +39,55 @@ or ``--allworkingmodules`` translation options. -``pypy/objspace/`` `object space`_ implementations +`pypy/objspace/`_ `object space`_ implementations -``pypy/objspace/std/`` the StdObjSpace_ implementing CPython's +`pypy/objspace/std/`_ the StdObjSpace_ implementing CPython's objects and types -``pypy/tool/`` various utilities and hacks used +`pypy/tool/`_ various utilities and hacks used from various places -``pypy/tool/algo/`` general-purpose algorithmic and mathematic +`pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools -``pypy/tool/pytest/`` support code for our `testing methods`_ +`pypy/tool/pytest/`_ support code for our `testing methods`_ -``rpython/annotator/`` `type inferencing code`_ for +`rpython/annotator/`_ `type inferencing code`_ for `RPython`_ programs -``rpython/config/`` handles the numerous options for RPython +`rpython/config/`_ handles the numerous options for RPython -``rpython/flowspace/`` the FlowObjSpace_ implementing +`rpython/flowspace/`_ the FlowObjSpace_ implementing `abstract interpretation`_ -`rpython/rlib/`` a `"standard library"`_ for RPython_ +`rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -``rpython/rtyper/`` the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ -``rpython/rtyper/lltypesystem/`` the `low-level type system`_ for +`rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -``rpython/memory/`` the `garbage collector`_ construction +`rpython/memory/`_ the `garbage collector`_ construction framework -``rpython/translator/`` translation_ backends and support code +`rpython/translator/`_ translation_ backends and support code -``rpython/translator/backendopt/`` general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code -``rpython/translator/c/`` the `GenC backend`_, producing C code +`rpython/translator/c/`_ the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -``pypy/goal/`` our `main PyPy-translation scripts`_ +`pypy/goal/`_ our `main PyPy-translation scripts`_ live here -``rpython/translator/tool/`` helper tools for translation +`rpython/translator/tool/`_ helper tools for translation -``dotviewer/`` `graph viewer`_ +`dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory containing test From noreply at buildbot.pypy.org Sat Apr 5 09:39:16 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:16 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: Merge pypy/default into latest-imporve-doc Message-ID: <20140405073916.B075F1C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70455:11ebaff897a8 Date: 2014-04-03 12:08 +0200 http://bitbucket.org/pypy/pypy/changeset/11ebaff897a8/ Log: Merge pypy/default into latest-imporve-doc diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,7 +48,11 @@ def tearDown(self): os.chdir(self.old_dir) - shutil.rmtree(self.temp_dir, True) + gc.collect() + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py --- a/lib-python/2.7/test/test_file.py +++ b/lib-python/2.7/test/test_file.py @@ -301,6 +301,7 @@ self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) # Reading after iteration hit EOF shouldn't hurt either + f.close() f = self.open(TESTFN, 'rb') try: for line in f: diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,3 +1,5 @@ +import imp, os + try: import cpyext except ImportError: @@ -10,4 +12,12 @@ pass # obscure condition of _ctypes_test.py being imported by py.test else: import _pypy_testcapi - _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') + cfile = '_ctypes_test.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) + imp.load_module('_ctypes_test', fp, filename, description) + except ImportError: + print('could not find _ctypes_test in %s' % output_dir) + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,22 @@ import os, sys, imp -import tempfile +import tempfile, binascii + +def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], content]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + def _get_c_extension_suffix(): for ext, mod, typ in imp.get_suffixes(): diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,33 +1,17 @@ -import sys, tempfile, imp, binascii, os +import imp, os try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -def get_hashed_dir(cfile): - with open(cfile,'r') as fid: - content = fid.read() - # from cffi's Verifier() - key = '\x00'.join([sys.version[:3], content]) - if sys.version_info >= (3,): - key = key.encode('utf-8') - k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) - k1 = k1.lstrip('0x').rstrip('L') - k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) - k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) - if not os.path.exists(output_dir): - os.mkdir(output_dir) - return output_dir - +import _pypy_testcapi cfile = '_testcapimodule.c' thisdir = os.path.dirname(__file__) -output_dir = get_hashed_dir(os.path.join(thisdir, cfile)) +output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) imp.load_module('_testcapi', fp, filename, description) except ImportError: - import _pypy_testcapi _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -429,12 +429,27 @@ Could we use LLVM? ------------------ -There is a (static) translation backend using LLVM in the branch -``llvm-translation-backend``. It can translate PyPy with or without the JIT on -Linux. +In theory yes. But we tried to use it 5 or 6 times already, as a +translation backend or as a JIT backend --- and failed each time. -Using LLVM as our JIT backend looks interesting as well -- we made an attempt, -but it failed: LLVM has no way to patch the generated machine code. +In more details: using LLVM as a (static) translation backend is +pointless nowadays because you can generate C code and compile it with +clang. (Note that compiling PyPy with clang gives a result that is not +faster than compiling it with gcc.) We might in theory get extra +benefits from LLVM's GC integration, but this requires more work on the +LLVM side before it would be remotely useful. Anyway, it could be +interfaced via a custom primitive in the C code. (The latest such +experimental backend is in the branch ``llvm-translation-backend``, +which can translate PyPy with or without the JIT on Linux.) + +On the other hand, using LLVM as our JIT backend looks interesting as +well --- but again we made an attempt, and it failed: LLVM has no way to +patch the generated machine code. + +So the position of the core PyPy developers is that if anyone wants to +make an N+1'th attempt with LLVM, they are welcome, and will be happy to +provide help in the IRC channel, but they are left with the burden of proof +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/stm.rst @@ -0,0 +1,244 @@ +====================== +Transactional Memory +====================== + +.. contents:: + + +This page is about ``pypy-stm``, a special in-development version of +PyPy which can run multiple independent CPU-hungry threads in the same +process in parallel. It is side-stepping what is known in the Python +world as the "global interpreter lock (GIL)" problem. + +"STM" stands for Software Transactional Memory, the technique used +internally. This page describes ``pypy-stm`` from the perspective of a +user, describes work in progress, and finally gives references to more +implementation details. + +This work was done by Remi Meier and Armin Rigo. + + +Introduction and current status +=============================== + +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats +listed below, it should be in theory within 25%-50% of the speed of +PyPy, comparing the JITting version in both cases. It is called STM for +Software Transactional Memory, which is the internal technique used (see +`Reference to implementation details`_). + +**pypy-stm requires 64-bit Linux for now.** + +Development is done in the branch `stmgc-c7`_. If you are only +interested in trying it out, you can download a Ubuntu 12.04 binary +here__. The current version supports four "segments", which means that +it will run up to four threads in parallel (in other words, you get a +GIL effect again, but only if trying to execute more than 4 threads). + +To build a version from sources, you first need to compile a custom +version of clang; we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +for all checkouts). Then apply all the patches in `this directory`__: +they are fixes for the very extensive usage that pypy-stm does of a +clang-only feature (without them, you get crashes of clang). Then get +the branch `stmgc-c7`_ of PyPy and run:: + + rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py + +.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +.. __: http://buildbot.pypy.org/nightly/stmgc-c7/ +.. __: http://clang.llvm.org/get_started.html +.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ + + +Caveats: + +* It should generally work. Please do `report bugs`_ that manifest as a + crash or wrong behavior (markedly different from the behavior of a + regular PyPy). Performance bugs are likely to be known issues; we're + working on them. + +* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, + which is "only" bad). Moreover, you should run it with a command like + ``pypy-stm --jit trace_limit=60000 args...``; the default value of + 6000 for ``trace_limit`` is currently too low (6000 should become + reasonable again as we improve). Also, in order to produce machine + code, the JIT needs to enter a special single-threaded mode for now. + This all means that you *will* get very bad performance results if + your program doesn't run for *many* seconds for now. + +* The GC is new; although clearly inspired by PyPy's regular GC, it + misses a number of optimizations for now. Programs allocating large + numbers of small objects that don't immediately die, as well as + programs that modify large lists or dicts, suffer from these missing + optimizations. + +* The GC has no support for destructors: the ``__del__`` method is + never called (including on file objects, which won't be closed for + you). This is of course temporary. + +* The STM system is based on very efficient read/write barriers, which + are mostly done (their placement could be improved a bit in + JIT-generated machine code). But the overall bookkeeping logic could + see more improvements (see Statistics_ below). + +* You can use `atomic sections`_, but the most visible missing thing is + that you don't get reports about the "conflicts" you get. This would + be the first thing that you need in order to start using atomic + sections more extensively. Also, for now: for better results, try to + explicitly force a transaction break just before (and possibly after) + each large atomic section, with ``time.sleep(0)``. + +* Forking the process is slow because the complete memory needs to be + copied manually right now. + +* Very long-running processes should eventually crash on an assertion + error because of a non-implemented overflow of an internal 29-bit + number, but this requires at the very least ten hours --- more + probably, several days or more. + +.. _`report bugs`: https://bugs.pypy.org/ + + + +Statistics +========== + +When a non-main thread finishes, you get statistics printed to stderr, +looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + spin loop 129512 0.094 s + +The first number is a counter; the second number gives the associated +time (the amount of real time that the thread was in this state; the sum +of all the times should be equal to the total time between the thread's +start and the thread's end). The most important points are "run +committed", which gives the amount of useful work, and "outside +transaction", which should give the time spent e.g. in library calls +(right now it seems to be a bit larger than that; to investigate). +Everything else is overhead of various forms. (Short-, medium- and +long-term future work involves reducing this overhead :-) + +These statistics are not printed out for the main thread, for now. + + +Atomic sections +=============== + +While one of the goal of pypy-stm is to give a GIL-free but otherwise +unmodified Python, the other goal is to push for a better way to use +multithreading. For this, you (as the Python programmer) get an API +in the ``__pypy__.thread`` submodule: + +* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in + a ``with __pypy__.thread.atomic:`` statement). It runs the whole + block of code without breaking the current transaction --- from + the point of view of a regular CPython/PyPy, this is equivalent to + saying that the GIL will not be released at all between the start and + the end of this block of code. + +The obvious usage is to use atomic blocks in the same way as one would +use locks: to protect changes to some shared data, you do them in a +``with atomic`` block, just like you would otherwise do them in a ``with +mylock`` block after ``mylock = thread.allocate_lock()``. This allows +you not to care about acquiring the correct locks in the correct order; +it is equivalent to having only one global lock. This is how +transactional memory is `generally described`__: as a way to efficiently +execute such atomic blocks, running them in parallel while giving the +illusion that they run in some serial order. + +.. __: http://en.wikipedia.org/wiki/Transactional_memory + +However, the less obvious intended usage of atomic sections is as a +wide-ranging replacement of explicit threads. You can turn a program +that is not multi-threaded at all into a program that uses threads +internally, together with large atomic sections to keep the behavior +unchanged. This capability can be hidden in a library or in the +framework you use; the end user's code does not need to be explicitly +aware of using threads. For a simple example of this, see +`lib_pypy/transaction.py`_. The idea is that if you have a program +where the function ``f(key, value)`` runs on every item of some big +dictionary, you can replace the loop with:: + + for key, value in bigdict.items(): + transaction.add(f, key, value) + transaction.run() + +This code runs the various calls to ``f(key, value)`` using a thread +pool, but every single call is done in an atomic section. The end +result is that the behavior should be exactly equivalent: you don't get +any extra multithreading issue. + +.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py + +================== + +Other APIs in pypy-stm: + +* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" + in this pypy-stm. This is the limit above which more threads will not + be able to execute on more cores. (Right now it is limited to 4 due + to inter-segment overhead, but should be increased in the future. It + should also be settable, and the default value should depend on the + number of actual CPUs.) + +* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but + raises an exception if you attempt to nest it inside another + ``atomic``. + +* ``__pypy__.thread.signals_enabled``: a context manager that runs + its block with signals enabled. By default, signals are only + enabled in the main thread; a non-main thread will not receive + signals (this is like CPython). Enabling signals in non-main threads + is useful for libraries where threads are hidden and the end user is + not expecting his code to run elsewhere than in the main thread. + +Note that all of this API is (or will be) implemented in a regular PyPy +too: for example, ``with atomic`` will simply mean "don't release the +GIL" and ``getsegmentlimit()`` will return 1. + +================== + + +Reference to implementation details +=================================== + +The core of the implementation is in a separate C library called stmgc_, +in the c7_ subdirectory. Please see the `README.txt`_ for more +information. + +.. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ +.. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ +.. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt + +PyPy itself adds on top of it the automatic placement of read__ and write__ +barriers and of `"becomes-inevitable-now" barriers`__, the logic to +`start/stop transactions as an RPython transformation`__ and as +`supporting`__ `C code`__, and the support in the JIT (mostly as a +`transformation step on the trace`__ and generation of custom assembler +in `assembler.py`__). + +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/readbarrier.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/memory/gctransform/stmframework.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/inevitable.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/jitdriver.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.h +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -127,3 +127,10 @@ .. branch: win32-fixes4 fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field \ No newline at end of file diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -690,7 +690,7 @@ def setup_bootstrap_path(executable): """ - Try to to as little as possible and to have the stdlib in sys.path. In + Try to do as little as possible and to have the stdlib in sys.path. In particular, we cannot use any unicode at this point, because lots of unicode operations require to be able to import encodings. """ diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -49,13 +49,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) @@ -3011,7 +3017,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def stmt_del_lineno(space, w_self): @@ -3038,7 +3045,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def stmt_del_col_offset(space, w_self): @@ -3074,7 +3082,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def FunctionDef_del_name(space, w_self): @@ -3201,7 +3210,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def ClassDef_del_name(space, w_self): @@ -3665,7 +3675,8 @@ w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'nl') + # need to save the original object too + w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state |= 16 def Print_del_nl(space, w_self): @@ -4571,7 +4582,8 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'module') + # need to save the original object too + w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 def ImportFrom_del_module(space, w_self): @@ -4620,7 +4632,8 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'level') + # need to save the original object too + w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 def ImportFrom_del_level(space, w_self): @@ -4938,7 +4951,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def expr_del_lineno(space, w_self): @@ -4965,7 +4979,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def expr_del_col_offset(space, w_self): @@ -6292,7 +6307,8 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'n') + # need to save the original object too + w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 def Num_del_n(space, w_self): @@ -6343,7 +6359,8 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + # need to save the original object too + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Str_del_s(space, w_self): @@ -6423,7 +6440,8 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'attr') + # need to save the original object too + w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 def Attribute_del_attr(space, w_self): @@ -6618,7 +6636,8 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'id') + # need to save the original object too + w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 def Name_del_id(space, w_self): @@ -6853,7 +6872,8 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + # need to save the original object too + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Const_del_value(space, w_self): @@ -7521,7 +7541,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def excepthandler_del_lineno(space, w_self): @@ -7548,7 +7569,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def excepthandler_del_col_offset(space, w_self): @@ -7716,7 +7738,8 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'vararg') + # need to save the original object too + w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 def arguments_del_vararg(space, w_self): @@ -7746,7 +7769,8 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'kwarg') + # need to save the original object too + w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 4 def arguments_del_kwarg(space, w_self): @@ -7824,7 +7848,8 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + # need to save the original object too + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def keyword_del_arg(space, w_self): @@ -7905,7 +7930,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 def alias_del_name(space, w_self): @@ -7935,7 +7961,8 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'asname') + # need to save the original object too + w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 def alias_del_asname(space, w_self): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -459,6 +459,7 @@ self.emit("raise OperationError(space.w_TypeError, " "space.w_None)", 3) else: + save_original_object = True level = 2 if field.opt and field.type.value != "int": self.emit("if space.is_w(w_new_value, space.w_None):", 2) @@ -596,13 +597,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -387,3 +387,40 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + + def test_dict_astNode(self): + import ast + num_node = ast.Num(n=2, lineno=2, col_offset=3) + dict_res = num_node.__dict__ + + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Num_notfullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 + num_node2 = copy.deepcopy(num_node) + + def test_issue1673_Num_fullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset + dict_res = num_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Str(self): + import ast + import copy + str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node.lineno == 2 + str_node2 = copy.deepcopy(str_node) + dict_res = str_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2} + \ No newline at end of file diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -254,6 +254,13 @@ if '__pypy__' in sys.builtin_module_names: assert repr(self.temppath) in g.getvalue() + def test_truncate(self): + f = self.file(self.temppath, "w") + f.write("foo") + f.close() + with self.file(self.temppath, 'r') as f: + raises(IOError, f.truncate, 100) + class AppTestNonblocking(object): def setup_class(cls): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,8 +64,6 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] - # tests are not strictly ansi C compliant, compile as C++ - kwds["compile_extra"].append("/TP") # prevent linking with PythonXX.lib w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -246,9 +246,9 @@ ("test_FromAny", "METH_NOARGS", ''' npy_intp dims[2] ={2, 3}; - PyObject * obj1 = PyArray_SimpleNew(2, dims, 1); + PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); PyArray_FILLWBYTE(obj1, 42); - PyObject * obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); + obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); Py_DECREF(obj1); return obj2; ''' @@ -256,9 +256,9 @@ ("test_FromObject", "METH_NOARGS", ''' npy_intp dims[2] ={2, 3}; - PyObject * obj1 = PyArray_SimpleNew(2, dims, 1); + PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); PyArray_FILLWBYTE(obj1, 42); - PyObject * obj2 = _PyArray_FromObject(obj1, 12, 0, 0); + obj2 = _PyArray_FromObject(obj1, 12, 0, 0); Py_DECREF(obj1); return obj2; ''' diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -238,8 +238,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *filenameObject = PyString_FromString("/path/to/file"); errno = EBADF; - PyObject *filenameObject = PyString_FromString("/path/to/file"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); Py_DECREF(filenameObject); return NULL; @@ -257,8 +257,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *intObject = PyInt_FromLong(3); errno = EBADF; - PyObject *intObject = PyInt_FromLong(3); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); Py_DECREF(intObject); return NULL; @@ -276,8 +276,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); errno = EBADF; - PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); Py_DECREF(lst); return NULL; @@ -295,8 +295,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); errno = EBADF; - PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); Py_DECREF(tuple); return NULL; @@ -314,8 +314,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *none = Py_BuildValue(""); errno = EBADF; - PyObject *none = Py_BuildValue(""); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); Py_DECREF(none); return NULL; diff --git a/pypy/module/test_lib_pypy/test_site_extra.py b/pypy/module/test_lib_pypy/test_site_extra.py --- a/pypy/module/test_lib_pypy/test_site_extra.py +++ b/pypy/module/test_lib_pypy/test_site_extra.py @@ -4,8 +4,11 @@ def test_preimported_modules(): lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', 'exceptions', 'signal', 'sys', 'zipimport'] - g = os.popen('"%s" -c "import sys; print sorted(sys.modules)"' % - (sys.executable,)) + if sys.platform == 'win32': + cmd = '%s' % (sys.executable,) + else: + cmd = '"%s"' % (sys.executable,) + g = os.popen(cmd + ' -c "import sys; print sorted(sys.modules)"') real_data = g.read() g.close() for name in lst: diff --git a/pypy/module/test_lib_pypy/test_testcapi.py b/pypy/module/test_lib_pypy/test_testcapi.py --- a/pypy/module/test_lib_pypy/test_testcapi.py +++ b/pypy/module/test_lib_pypy/test_testcapi.py @@ -8,7 +8,7 @@ def test_get_hashed_dir(): import sys # This should not compile _testcapi, so the output is empty - script = "import _testcapi; assert 'get_hashed_dir' in dir(_testcapi)" + script = "import _testcapi; assert 'get_hashed_dir' not in dir(_testcapi)" output = py.process.cmdexec('''"%s" -c "%s"''' % (sys.executable, script)) assert output == '' diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -151,6 +151,9 @@ '*.c', '*.o')) for file in ['LICENSE', 'README.rst']: shutil.copy(str(basedir.join(file)), str(pypydir)) + for file in ['_testcapimodule.c', '_ctypes_test.c']: + shutil.copyfile(str(basedir.join('lib_pypy', file)), + str(pypydir.join('lib_pypy', file))) # spdir = pypydir.ensure('site-packages', dir=True) shutil.copy(str(basedir.join('site-packages', 'README')), str(spdir)) diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -17,6 +17,8 @@ exe_name_in_archive = 'bin/pypy' pypy_c = py.path.local(pypydir).join('goal', basename) if not pypy_c.check(): + if sys.platform == 'win32': + assert False, "test on win32 requires exe" pypy_c.write("#!/bin/sh") pypy_c.chmod(0755) fake_pypy_c = True @@ -81,6 +83,8 @@ package.USE_ZIPFILE_MODULE = prev def test_fix_permissions(tmpdir): + if sys.platform == 'win32': + py.test.skip('needs to be more general for windows') def check(f, mode): assert f.stat().mode & 0777 == mode # diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -49,6 +49,11 @@ from rpython.rlib.rfile import create_file return ctx.appcall(create_file, *args_w) + at register_flow_sc(os.fdopen) +def sc_os_fdopen(ctx, *args_w): + from rpython.rlib.rfile import create_fdopen_rfile + return ctx.appcall(create_fdopen_rfile, *args_w) + @register_flow_sc(os.tmpfile) def sc_os_tmpfile(ctx): from rpython.rlib.rfile import create_temp_rfile diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -362,11 +362,18 @@ self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: if self._finish_gcmap: - self._finish_gcmap[0] |= r_uint(0) # r0 + # we're returning with a guard_not_forced_2, and + # additionally we need to say that r0 contains + # a reference too: + self._finish_gcmap[0] |= r_uint(0) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) + elif self._finish_gcmap: + # we're returning with a guard_not_forced_2 + gcmap = self._finish_gcmap + self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather # keep that one and kill all the others diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -916,3 +916,73 @@ cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 + def test_finish_without_gcmap(self): + cpu = self.cpu + + loop = self.parse(""" + [i0] + finish(i0, descr=finaldescr) + """, namespace={'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, 10)) + assert not frame.jf_gcmap + + def test_finish_with_trivial_gcmap(self): + cpu = self.cpu + + loop = self.parse(""" + [p0] + finish(p0, descr=finaldescr) + """, namespace={'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, n)) + assert getmap(frame) == '1' + + def test_finish_with_guard_not_forced_2_ref(self): + cpu = self.cpu + + loop = self.parse(""" + [p0, p1] + guard_not_forced_2(descr=faildescr) [p1] + finish(p0, descr=finaldescr) + """, namespace={'faildescr': BasicFailDescr(1), + 'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, n, n)) + assert getmap(frame).count('1') == 2 + + def test_finish_with_guard_not_forced_2_int(self): + cpu = self.cpu + + loop = self.parse(""" + [i0, p1] + guard_not_forced_2(descr=faildescr) [p1] + finish(i0, descr=finaldescr) + """, namespace={'faildescr': BasicFailDescr(1), + 'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, 10, n)) + assert getmap(frame).count('1') == 1 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1857,11 +1857,18 @@ arglist = op.getarglist() if arglist and arglist[0].type == REF: if self._finish_gcmap: - self._finish_gcmap[0] |= r_uint(1) # rax + # we're returning with a guard_not_forced_2, and + # additionally we need to say that eax/rax contains + # a reference too: + self._finish_gcmap[0] |= r_uint(1) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) + elif self._finish_gcmap: + # we're returning with a guard_not_forced_2 + gcmap = self._finish_gcmap + self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather # keep that one and kill all the others diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -47,6 +47,7 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal(fileno, [lltype.Ptr(FILE)], rffi.INT) +c_fdopen = llexternal('fdopen', [rffi.INT, rffi.CCHARP], lltype.Ptr(FILE)) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) @@ -93,6 +94,17 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) +def create_fdopen_rfile(fd, mode="r"): + assert mode is not None + ll_mode = rffi.str2charp(mode) + try: + ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) + if not ll_f: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_mode, flavor='raw') + return RFile(ll_f) def create_popen_file(command, type): ll_command = rffi.str2charp(command) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -193,7 +193,7 @@ # Truncate. Note that this may grow the file! handle = get_osfhandle(fd) if not SetEndOfFile(handle): - raise WindowsError(GetLastError(), + raise OSError(GetLastError(), "Could not truncate file") finally: # we restore the file pointer position in any case diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -79,6 +79,22 @@ f() self.interpret(f, []) + def test_fdopen(self): + fname = str(self.tmpdir.join('file_4a')) + + def f(): + f = open(fname, "w") + new_fno = os.dup(f.fileno()) + f2 = os.fdopen(new_fno, "w") + f.close() + f2.write("xxx") + f2.close() + + f() + assert open(fname).read() == "xxx" + self.interpret(f, []) + assert open(fname).read() == "xxx" + def test_fileno(self): fname = str(self.tmpdir.join('file_5')) diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -410,7 +410,7 @@ total = sum([b for a, b in l]) for a, b in l: if a is None: - a = 'interpret' + a = 'normal-execution' s = " " * (50 - len(a)) print >>outfile, a, s, str(b*100/total) + "%" if out != '-': diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -11,6 +11,9 @@ def run_subprocess(executable, args, env=None, cwd=None): return _run(executable, args, env, cwd) +shell_default = False +if sys.platform == 'win32': + shell_default = True def _run(executable, args, env, cwd): # unless overridden below if isinstance(args, str): @@ -21,7 +24,9 @@ args = [str(executable)] else: args = [str(executable)] + args - shell = False + # shell=True on unix-like is a known security vulnerability, but + # on windows shell=True does not properly propogate the env dict + shell = shell_default # Just before spawning the subprocess, do a gc.collect(). This # should help if we are running on top of PyPy, if the subprocess diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -414,7 +414,8 @@ try: returncode, stdout, stderr = _run_subprocess( 'nmake', - ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts) + ['/nologo', '/f', str(path.join('Makefile'))] + extra_opts, + env = self.c_environ) finally: oldcwd.chdir() From noreply at buildbot.pypy.org Sat Apr 5 09:39:18 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:18 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: docs: pypy/objspace/std/ *type.py -> *object.py Message-ID: <20140405073918.070A51C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70456:006293ce8bcb Date: 2014-04-03 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/006293ce8bcb/ Log: docs: pypy/objspace/std/ *type.py -> *object.py diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -348,15 +348,15 @@ * the *implementation* module, called ``xxxobject.py``. The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods +example, `pypy/objspace/std/listobject.py`_ contains the specification of the object you get when +you type ``list`` in a PyPy prompt. `pypy/objspace/std/listobject.py`_ enumerates the methods specific to lists, like ``append()``. A particular method implemented by all types is the ``__new__()`` special method, which in Python's new-style-classes world is responsible for creating an instance of the type. In PyPy, ``__new__()`` locates and imports the module implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ +arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupleobject.py`_ defines ``__new__()`` to import the class ``W_TupleObject`` from `pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a "real" implementation of tuples: the way the data is stored in the @@ -374,9 +374,9 @@ same Python type. PyPy knows that (e.g.) the application-level type of its interpreter-level ``W_StringObject`` instances is str because there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all +points back to the string type specification from `pypy/objspace/std/stringobject.py`_; all other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +`pypy/objspace/std/stringobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -491,7 +491,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. From noreply at buildbot.pypy.org Sat Apr 5 09:39:19 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:19 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: removed reference to sprint pictures. Message-ID: <20140405073919.696B91C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70457:394eb4e6a4f2 Date: 2014-04-04 10:35 +0200 http://bitbucket.org/pypy/pypy/changeset/394eb4e6a4f2/ Log: removed reference to sprint pictures. sadly these pictures seem to be lost for now. diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! .. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt .. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt .. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. .. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html .. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html .. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html .. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html .. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished From noreply at buildbot.pypy.org Sat Apr 5 09:39:20 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:20 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: add 'of historical interest only` warning to statistics page Message-ID: <20140405073920.A1E151C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70458:383dd9780295 Date: 2014-04-04 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/383dd9780295/ Log: add 'of historical interest only` warning to statistics page diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= From noreply at buildbot.pypy.org Sat Apr 5 09:39:21 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:21 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: removed codespeak account information. Lacks relevancy nowadays. Message-ID: <20140405073921.E4F021C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70459:9b5a1e4cc1f6 Date: 2014-04-04 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/9b5a1e4cc1f6/ Log: removed codespeak account information. Lacks relevancy nowadays. diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ From noreply at buildbot.pypy.org Sat Apr 5 09:39:23 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:23 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: fixed overlooked codespeak.net url to eu-report D07.1 Message-ID: <20140405073923.386331C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70460:30817d785d84 Date: 2014-04-04 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/30817d785d84/ Log: fixed overlooked codespeak.net url to eu-report D07.1 diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf From noreply at buildbot.pypy.org Sat Apr 5 09:39:24 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 5 Apr 2014 09:39:24 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: removed unused references in docs Message-ID: <20140405073924.754181C022D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70461:522cff267f2e Date: 2014-04-04 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/522cff267f2e/ Log: removed unused references in docs diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html From noreply at buildbot.pypy.org Sat Apr 5 09:39:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 09:39:25 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in groggi/pypy/latest-improve-doc (pull request #224) Message-ID: <20140405073925.C99C11C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70462:4c3f06367a60 Date: 2014-04-05 09:38 +0200 http://bitbucket.org/pypy/pypy/changeset/4c3f06367a60/ Log: Merged in groggi/pypy/latest-improve-doc (pull request #224) PyPy Docs: fighting broken links #2 diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -348,15 +348,15 @@ * the *implementation* module, called ``xxxobject.py``. The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods +example, `pypy/objspace/std/listobject.py`_ contains the specification of the object you get when +you type ``list`` in a PyPy prompt. `pypy/objspace/std/listobject.py`_ enumerates the methods specific to lists, like ``append()``. A particular method implemented by all types is the ``__new__()`` special method, which in Python's new-style-classes world is responsible for creating an instance of the type. In PyPy, ``__new__()`` locates and imports the module implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ +arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupleobject.py`_ defines ``__new__()`` to import the class ``W_TupleObject`` from `pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a "real" implementation of tuples: the way the data is stored in the @@ -374,9 +374,9 @@ same Python type. PyPy knows that (e.g.) the application-level type of its interpreter-level ``W_StringObject`` instances is str because there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all +points back to the string type specification from `pypy/objspace/std/stringobject.py`_; all other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +`pypy/objspace/std/stringobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -491,7 +491,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -86,7 +86,7 @@ option (this is the default at some optimization levels like ``-O1``, but unneeded for high-performance translations like ``-O2``). You may get it at -http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc-7.1.tar.gz +http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with pypy. Unpack this folder in the base directory. Then open a command From noreply at buildbot.pypy.org Sat Apr 5 09:56:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 09:56:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Rewrite parts of objspace.rst's "Object types" now that the xxxtype.py files have mostly disappeared. Message-ID: <20140405075653.18BEB1C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70463:b0013db39761 Date: 2014-04-05 09:56 +0200 http://bitbucket.org/pypy/pypy/changeset/b0013db39761/ Log: Rewrite parts of objspace.rst's "Object types" now that the xxxtype.py files have mostly disappeared. Other fixes in order to run pypy/doc/tool/makeref.py. diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -4,7 +4,6 @@ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ .. _`pypy/bin/pyinteractive.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/pyinteractive.py @@ -35,7 +34,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -49,21 +47,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -75,6 +73,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -93,16 +96,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listobject.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listobject.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupleobject.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringobject.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringobject.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -194,9 +194,9 @@ unchanged. This capability can be hidden in a library or in the framework you use; the end user's code does not need to be explicitly aware of using threads. For a simple example of this, see -`lib_pypy/transaction.py`_. The idea is that if you have a program -where the function ``f(key, value)`` runs on every item of some big -dictionary, you can replace the loop with:: +`transaction.py`_ in ``lib_pypy``. The idea is that if you have a +program where the function ``f(key, value)`` runs on every item of some +big dictionary, you can replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) @@ -217,7 +217,7 @@ is likely to be found, and communicates it to the system, using for example the ``transaction.add()`` scheme. -.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP ================== From noreply at buildbot.pypy.org Sat Apr 5 10:35:41 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 5 Apr 2014 10:35:41 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Only run_fork_hooks('parent') if we called run_fork_hooks('before'), Message-ID: <20140405083541.368281C0433@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r70464:22c14713d318 Date: 2014-04-05 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/22c14713d318/ Log: Only run_fork_hooks('parent') if we called run_fork_hooks('before'), otherwise we get strange errors about the Import lock when a parameter does not have the expected type. diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -167,36 +167,37 @@ run_fork_hooks('before', space) try: - pid = os.fork() - except OSError, e: - raise wrap_oserror(space, e) + try: + pid = os.fork() + except OSError, e: + raise wrap_oserror(space, e) - if pid == 0: - # Child process - # Code from here to _exit() must only use - # async-signal-safe functions, listed at `man 7 signal` - # http://www.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html. - if not space.is_none(w_preexec_fn): - # We'll be calling back into Python later so we need - # to do this. This call may not be async-signal-safe - # but neither is calling back into Python. The user - # asked us to use hope as a strategy to avoid - # deadlock... - run_fork_hooks('child', space) + if pid == 0: + # Child process + # Code from here to _exit() must only use + # async-signal-safe functions, listed at `man 7 signal` + # http://www.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html. + if not space.is_none(w_preexec_fn): + # We'll be calling back into Python later so we need + # to do this. This call may not be async-signal-safe + # but neither is calling back into Python. The user + # asked us to use hope as a strategy to avoid + # deadlock... + run_fork_hooks('child', space) - c_child_exec( - l_exec_array, l_argv, l_envp, l_cwd, - p2cread, p2cwrite, c2pread, c2pwrite, - errread, errwrite, errpipe_read, errpipe_write, - close_fds, restore_signals, call_setsid, - l_fds_to_keep, len(fds_to_keep), - PreexecCallback.run_function, None) - os._exit(255) + c_child_exec( + l_exec_array, l_argv, l_envp, l_cwd, + p2cread, p2cwrite, c2pread, c2pwrite, + errread, errwrite, errpipe_read, errpipe_write, + close_fds, restore_signals, call_setsid, + l_fds_to_keep, len(fds_to_keep), + PreexecCallback.run_function, None) + os._exit(255) + finally: + # parent process + run_fork_hooks('parent', space) - # parent process finally: - run_fork_hooks('parent', space) - preexec.w_preexec_fn = None if l_cwd: From noreply at buildbot.pypy.org Sat Apr 5 11:08:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 11:08:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Remove tasks done, and add new ones Message-ID: <20140405090816.8CFD31C303A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70465:265041841734 Date: 2014-04-05 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/265041841734/ Log: Remove tasks done, and add new ones diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -28,12 +28,6 @@ ------------------------------------------------------------ -reintroduce 'stm_ignored', disabled in 4294a7789103 for causing -a rare bug in ll_strhash() shown by http://bpaste.net/show/190868/ -failing after a few minutes (or just running translate.py) - ------------------------------------------------------------- - __pypy__.thread.getsegmentlimit(): XXX This limit is so far a compile time option (STM_NB_SEGMENTS in @@ -43,21 +37,42 @@ ------------------------------------------------------------ -remap the group of read markers corresponding to a nusery into -a single physical "garbage" page - ------------------------------------------------------------- - JIT: add an artificial malloc if the loop is so small as to contain any! ------------------------------------------------------------ -**URGENT** -become_inevitable: getarrayitem/raw, for the jit/metainterp/counters.py +weakrefs stay alive longer than expected:: + + y = some object that was already alive for a while + x = ref(y) + del y + gc.collect() # y doesn't die: it's needed if we abort + assert x() is None # so this assert fails + +A dying weakref might be a cross-transaction way to exchange +information when there should be none:: + + thread 1: + if : + x = some_weakref() + ... + + thread 2: + gc.collect() # will kill some_weakref() but only if + # thread 1 did not, so far, read it + if some_weakref() is None: + ... ------------------------------------------------------------ +missing recursion detection (both in interpreted and JITted mode) + +------------------------------------------------------------ + +change the limit of 1 GB + +------------------------------------------------------------ From noreply at buildbot.pypy.org Sat Apr 5 11:08:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 11:08:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Add another caveat Message-ID: <20140405090843.5ED2D1C303A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70466:ee5afdc78d27 Date: 2014-04-05 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/ee5afdc78d27/ Log: Add another caveat diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -80,6 +80,10 @@ * So far, small examples work fine, but there are still a number of bugs. We're busy fixing them. +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not detected correctly, so may + cause segmentation faults. + * The JIT warm-up time is abysmal (as opposed to the regular PyPy's, which is "only" bad). Moreover, you should run it with a command like ``pypy-stm --jit trace_limit=60000 args...``; the default value of @@ -120,6 +124,7 @@ probably, several days or more. .. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h From noreply at buildbot.pypy.org Sat Apr 5 11:15:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 11:15:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Mention weakrefs Message-ID: <20140405091517.8EB891C0433@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70467:3471ca7a93f8 Date: 2014-04-05 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/3471ca7a93f8/ Log: Mention weakrefs diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -99,9 +99,11 @@ programs that modify large lists or dicts, suffer from these missing optimizations. -* The GC has no support for destructors: the ``__del__`` method is - never called (including on file objects, which won't be closed for - you). This is of course temporary. +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in From noreply at buildbot.pypy.org Sat Apr 5 12:32:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 12:32:17 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: current status Message-ID: <20140405103217.855151C14DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70468:f92929e936c1 Date: 2014-04-05 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/f92929e936c1/ Log: current status diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -64,6 +64,17 @@ if some_weakref() is None: ... +It might be enough to apply these rules: (1) an explicit gc.collect() +turns the transaction inevitable first; (2) if any non-inevitable +transaction has *read* the weakref yet, then its target remains alive. + +This might require a tweak to consider an object as dead (for the +purposes of weakrefs) if it's only reachable via the old version of an +old_modified_object in the inevitable transaction: in this case, +other transaction may still reach the objects in question, so it +shouldn't be deallocted just now, but by doing so they will put +themselves in a situation where they necessarily abort. + ------------------------------------------------------------ missing recursion detection (both in interpreted and JITted mode) From noreply at buildbot.pypy.org Sat Apr 5 14:38:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 14:38:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Detail Message-ID: <20140405123805.8F1CB1C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70469:a8d706f379cf Date: 2014-04-05 14:37 +0200 http://bitbucket.org/pypy/pypy/changeset/a8d706f379cf/ Log: Detail diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -567,7 +567,8 @@ # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. - hash = self.status & self.ST_SHIFT_MASK + hash = self.status + assert hash == (self.status & self.ST_SHIFT_MASK) # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. From noreply at buildbot.pypy.org Sat Apr 5 14:52:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 14:52:37 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Fix the sentence. Also more optimistically give the upper bound at 40% Message-ID: <20140405125237.EB2631C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5185:66474b6f7106 Date: 2014-04-05 14:52 +0200 http://bitbucket.org/pypy/extradoc/changeset/66474b6f7106/ Log: Fix the sentence. Also more optimistically give the upper bound at 40% rather than 50%. diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -86,8 +86,9 @@ available frameworks to use the new feature. The developers will be Armin Rigo and Remi Meier and possibly others. -We currently estimate the final performance goal at 25% to 50% of the -speed of the regular PyPy in fully serial applications. (This goal has +We currently estimate the final performance goal to be a slow-down of +25% to 40%, i.e. running a fully serial application would take between +1.25 and 1.40x the time it takes in a regular PyPy. (This goal has been reached already in some cases, but we need to make this result more broadly applicable.) We feel confident that it can work, in the following sense: the performance of PyPy-TM running any suitable From noreply at buildbot.pypy.org Sat Apr 5 18:40:34 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 5 Apr 2014 18:40:34 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: intermediate checkin: start removing resume_at_jump_descr Message-ID: <20140405164034.629871C10C6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70470:3d10aa7553c0 Date: 2014-04-05 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/3d10aa7553c0/ Log: intermediate checkin: start removing resume_at_jump_descr it was used in unroll.py as the target to jump to for newly created guards. this was hugely complicated and I suspect actually wrong in some cases. Instead, generate a dummy guard before a jump. This guard is removed by optimizeopt, but its descr can be used by unroll when inventing new guards. almost finished, but some tests show differences in the loop enter count, still to be investigated. diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -106,7 +106,7 @@ def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, - resume_at_jump_descr, full_preamble_needed=True, + full_preamble_needed=True, try_disabling_unroll=False): """Try to compile a new procedure by closing the current history back to the first operation. @@ -128,7 +128,6 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] @@ -187,7 +186,7 @@ def compile_retrace(metainterp, greenkey, start, inputargs, jumpargs, - resume_at_jump_descr, partial_trace, resumekey): + partial_trace, resumekey): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -203,7 +202,6 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] - part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ @@ -764,7 +762,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey, resume_at_jump_descr=None): +def compile_trace(metainterp, resumekey): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -780,7 +778,6 @@ # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] - new_trace.resume_at_jump_descr = resume_at_jump_descr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -628,7 +628,6 @@ call_pure_results = None logops = None quasi_immutable_deps = None - resume_at_jump_descr = None def _token(*args): raise Exception("TreeLoop.token is killed") diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -543,6 +543,9 @@ return self.emit_operation(op) + def optimize_GUARD_FUTURE_CONDITION(self, op): + pass # just remove it + def optimize_INT_FLOORDIV(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -61,6 +61,9 @@ op.setdescr(descr.target_tokens[0]) self.emit_operation(op) + def optimize_GUARD_FUTURE_CONDITION(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) OptSimplify.propagate_forward = dispatch_opt diff --git a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeMetaInterpStaticData) from rpython.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from rpython.jit.metainterp.resoperation import rop, opname, ResOperation @@ -8,6 +8,8 @@ from py.test import raises from rpython.jit.metainterp.optimizeopt.optimizer import Optimization from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimizeopt.heap import OptHeap +from rpython.jit.metainterp.optimizeopt.rewrite import OptRewrite class BaseTestMultiLabel(BaseTest): @@ -20,7 +22,6 @@ part = TreeLoop('part') part.inputargs = loop.inputargs - part.resume_at_jump_descr = FakeDescrWithSnapshot() token = loop.original_jitcell_token optimized = TreeLoop('optimized') @@ -42,6 +43,7 @@ operations.append(label) part.operations = operations + self.add_guard_future_condition(part) self._do_optimize_loop(part, None) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] @@ -502,7 +504,7 @@ self.loop = loop loop.call_pure_results = args_dict() metainterp_sd = FakeMetaInterpStaticData(self.cpu) - optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + optimize_unroll(metainterp_sd, loop, [OptRewrite(), OptRenameStrlen(), OptHeap(), OptPure()], True) def test_optimizer_renaming_boxes1(self): ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -51,7 +51,8 @@ if expected_preamble: expected_preamble = self.parse(expected_preamble) if expected_short: - expected_short = self.parse(expected_short) + # the short preamble doesn't have fail descrs, they are patched in when it is used + expected_short = self.parse(expected_short, want_fail_descr=False) preamble = self.unroll_and_optimize(loop, call_pure_results) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -355,11 +355,21 @@ class BaseTest(object): - def parse(self, s, boxkinds=None): + def parse(self, s, boxkinds=None, want_fail_descr=True): + if want_fail_descr: + invent_fail_descr = self.invent_fail_descr + else: + invent_fail_descr = lambda *args: None return parse(s, self.cpu, self.namespace, type_system=self.type_system, boxkinds=boxkinds, - invent_fail_descr=self.invent_fail_descr) + invent_fail_descr=invent_fail_descr) + + def add_guard_future_condition(self, res): + # invent a GUARD_FUTURE_CONDITION to not have to change all tests + if res.operations[-1].getopnum() == rop.JUMP: + guard = ResOperation(rop.GUARD_FUTURE_CONDITION, [], None, descr=self.invent_fail_descr(None, -1, [])) + res.operations.insert(-1, guard) def invent_fail_descr(self, model, opnum, fail_args): if fail_args is None: @@ -397,6 +407,7 @@ optimize_trace(metainterp_sd, loop, self.enable_opts) def unroll_and_optimize(self, loop, call_pure_results=None): + self.add_guard_future_condition(loop) operations = loop.operations jumpop = operations[-1] assert jumpop.getopnum() == rop.JUMP @@ -408,7 +419,6 @@ preamble = TreeLoop('preamble') preamble.inputargs = inputargs - preamble.resume_at_jump_descr = FakeDescrWithSnapshot() token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ @@ -419,7 +429,6 @@ assert preamble.operations[-1].getopnum() == rop.LABEL inliner = Inliner(inputargs, jump_args) - loop.resume_at_jump_descr = preamble.resume_at_jump_descr loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], @@ -450,18 +459,6 @@ def __eq__(self, other): return isinstance(other, FakeDescr) -class FakeDescrWithSnapshot(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescrWithSnapshot() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) - - def convert_old_style_to_targets(loop, jump): newloop = TreeLoop(loop.name) newloop.inputargs = loop.inputargs diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -7,7 +7,7 @@ from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ - equaloplists, FakeDescrWithSnapshot + equaloplists from rpython.jit.metainterp.optimizeopt.intutils import IntBound from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData @@ -488,7 +488,6 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - bridge.resume_at_jump_descr = FakeDescrWithSnapshot() optimize_trace(metainterp_sd, bridge, self.enable_opts) @@ -497,6 +496,7 @@ loops = (loops, ) loops = [self.parse(loop) for loop in loops] bridge = self.parse(bridge) + self.add_guard_future_condition(bridge) for loop in loops: loop.preamble = self.unroll_and_optimize(loop) preamble = loops[0].preamble diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -77,6 +77,12 @@ else: start_label = None + patchguardop = None + if len(loop.operations) > 1: + patchguardop = loop.operations[-2] + if patchguardop.getopnum() != rop.GUARD_FUTURE_CONDITION: + patchguardop = None + jumpop = loop.operations[-1] if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL: loop.operations = loop.operations[:-1] @@ -94,7 +100,7 @@ stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) if jumpop.getopnum() == rop.JUMP: - if self.jump_to_already_compiled_trace(jumpop): + if self.jump_to_already_compiled_trace(jumpop, patchguardop): # Found a compiled trace to jump to if self.short: # Construct our short preamble @@ -108,7 +114,7 @@ descr=start_label.getdescr()) if self.short: # Construct our short preamble - self.close_loop(start_label, jumpop) + self.close_loop(start_label, jumpop, patchguardop) else: self.optimizer.send_extra_operation(jumpop) return @@ -162,11 +168,6 @@ original_jump_args = targetop.getarglist() jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] - assert self.optimizer.loop.resume_at_jump_descr - resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() - assert isinstance(resume_at_jump_descr, ResumeGuardDescr) - resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) - modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) @@ -195,7 +196,6 @@ targetop.initarglist(inputargs) target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] - target_token.resume_at_jump_descr = resume_at_jump_descr exported_values = {} for box in inputargs: @@ -230,7 +230,6 @@ self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes - self.short_resume_at_jump_descr = target_token.resume_at_jump_descr self.initial_virtual_state = target_token.virtual_state seen = {} @@ -298,7 +297,7 @@ self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) self.finalize_short_preamble(start_label) - def close_loop(self, start_label, jumpop): + def close_loop(self, start_label, jumpop, patchguardop): virtual_state = self.initial_virtual_state short_inputargs = self.short[0].getarglist() inputargs = self.inputargs @@ -335,6 +334,11 @@ # Note that self.short might be extended during this loop op = self.short[i] newop = self.short_inliner.inline_op(op) + if newop.is_guard(): + if not patchguardop: + raise InvalidLoop("would like to have short preamble, but it has a guard and there's no guard_future_condition") + descr = patchguardop.getdescr().clone_if_mutable() + newop.setdescr(descr) self.optimizer.send_extra_operation(newop) if op.result in self.short_boxes.assumed_classes: classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) @@ -417,8 +421,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.resume_at_jump_descr.clone_if_mutable() - op.setdescr(descr) + op.setdescr(None) # will be set to a proper descr when the preamble is used short[i] = op # Clone ops and boxes to get private versions and @@ -440,8 +443,6 @@ if op.result and op.result in self.short_boxes.assumed_classes: target_token.assumed_classes[newop.result] = self.short_boxes.assumed_classes[op.result] short[i] = newop - target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() - inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -485,8 +486,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.short_resume_at_jump_descr.clone_if_mutable() - op.setdescr(descr) + op.setdescr(None) # will be set to a proper descr when the preamble is used if guards_needed and self.short_boxes.has_producer(op.result): value_guards = self.getvalue(op.result).make_guards(op.result) @@ -528,7 +528,7 @@ box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - def jump_to_already_compiled_trace(self, jumpop): + def jump_to_already_compiled_trace(self, jumpop, patchguardop): assert jumpop.getopnum() == rop.JUMP cell_token = jumpop.getdescr() @@ -570,6 +570,21 @@ debugmsg = 'Guarded to match ' except InvalidLoop: pass + #else: + # import pdb; pdb.set_trace() + if ok and not patchguardop: + # if we can't patch the guards to go to a good target, no use + # in jumping to this label + for guard in extra_guards: + if guard.is_guard(): + ok = False + break + else: + for shop in target.short_preamble[1:]: + if shop.is_guard(): + ok = False + break + target.virtual_state.debug_print(debugmsg, bad) if ok: @@ -584,14 +599,16 @@ for guard in extra_guards: if guard.is_guard(): - descr = target.resume_at_jump_descr.clone_if_mutable() - inliner.inline_descr_inplace(descr) + descr = patchguardop.getdescr().clone_if_mutable() guard.setdescr(descr) self.optimizer.send_extra_operation(guard) try: for shop in target.short_preamble[1:]: newop = inliner.inline_op(shop) + if newop.is_guard(): + descr = patchguardop.getdescr().clone_if_mutable() + newop.setdescr(descr) self.optimizer.send_extra_operation(newop) if shop.result in target.assumed_classes: classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1049,10 +1049,8 @@ # much less expensive to blackhole out of. saved_pc = self.pc self.pc = orgpc - resumedescr = compile.ResumeAtPositionDescr() - self.metainterp.capture_resumedata(resumedescr, orgpc) - - self.metainterp.reached_loop_header(greenboxes, redboxes, resumedescr) + self.metainterp.generate_guard(rop.GUARD_FUTURE_CONDITION, resumepc=orgpc) + self.metainterp.reached_loop_header(greenboxes, redboxes) self.pc = saved_pc # no exception, which means that the jit_merge_point did not # close the loop. We have to put the possibly-modified list @@ -1789,6 +1787,8 @@ self.jitdriver_sd) elif opnum == rop.GUARD_NOT_INVALIDATED: resumedescr = compile.ResumeGuardNotInvalidated() + elif opnum == rop.GUARD_FUTURE_CONDITION: + resumedescr = compile.ResumeAtPositionDescr() else: resumedescr = compile.ResumeGuardDescr() guard_op = self.history.record(opnum, moreargs, None, descr=resumedescr) @@ -2060,7 +2060,7 @@ else: duplicates[box] = None - def reached_loop_header(self, greenboxes, redboxes, resumedescr): + def reached_loop_header(self, greenboxes, redboxes): self.heapcache.reset(reset_virtuals=False) duplicates = {} @@ -2085,7 +2085,7 @@ # from the interpreter. if not self.partial_trace: # FIXME: Support a retrace to be a bridge as well as a loop - self.compile_trace(live_arg_boxes, resumedescr) + self.compile_trace(live_arg_boxes) # raises in case it works -- which is the common case, hopefully, # at least for bridges starting from a guard. @@ -2110,7 +2110,7 @@ raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP) # For now # Found! Compile it as a loop. # raises in case it works -- which is the common case - self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) + self.compile_loop(original_boxes, live_arg_boxes, start) # creation of the loop was cancelled! self.cancel_count += 1 if self.staticdata.warmrunnerdesc: @@ -2119,7 +2119,7 @@ if self.cancel_count > memmgr.max_unroll_loops: self.compile_loop_or_abort(original_boxes, live_arg_boxes, - start, resumedescr) + start) self.staticdata.log('cancelled, tracing more...') # Otherwise, no loop found so far, so continue tracing. @@ -2224,7 +2224,7 @@ return token def compile_loop(self, original_boxes, live_arg_boxes, start, - resume_at_jump_descr, try_disabling_unroll=False): + try_disabling_unroll=False): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] if not self.partial_trace: @@ -2237,13 +2237,12 @@ target_token = compile.compile_retrace(self, greenkey, start, original_boxes[num_green_args:], live_arg_boxes[num_green_args:], - resume_at_jump_descr, self.partial_trace, + self.partial_trace, self.resumekey) else: target_token = compile.compile_loop(self, greenkey, start, original_boxes[num_green_args:], live_arg_boxes[num_green_args:], - resume_at_jump_descr, try_disabling_unroll=try_disabling_unroll) if target_token is not None: assert isinstance(target_token, TargetToken) @@ -2257,18 +2256,18 @@ self.raise_continue_running_normally(live_arg_boxes, jitcell_token) def compile_loop_or_abort(self, original_boxes, live_arg_boxes, - start, resume_at_jump_descr): + start): """Called after we aborted more than 'max_unroll_loops' times. As a last attempt, try to compile the loop with unrolling disabled. """ if not self.partial_trace: self.compile_loop(original_boxes, live_arg_boxes, start, - resume_at_jump_descr, try_disabling_unroll=True) + try_disabling_unroll=True) # self.staticdata.log('cancelled too many times!') raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP) - def compile_trace(self, live_arg_boxes, resume_at_jump_descr): + def compile_trace(self, live_arg_boxes): num_green_args = self.jitdriver_sd.num_green_args greenkey = live_arg_boxes[:num_green_args] target_jitcell_token = self.get_procedure_token(greenkey, True) @@ -2278,7 +2277,7 @@ self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) try: - target_token = compile.compile_trace(self, self.resumekey, resume_at_jump_descr) + target_token = compile.compile_trace(self, self.resumekey) finally: self.history.operations.pop() # remove the JUMP if target_token is not None: # raise if it *worked* correctly diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -400,6 +400,7 @@ 'GUARD_NOT_FORCED/0d', # may be called with an exception currently set 'GUARD_NOT_FORCED_2/0d', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d', + 'GUARD_FUTURE_CONDITION/0d', # is removable, may be patched by an optimization '_GUARD_LAST', # ----- end of guard operations ----- '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations ----- From noreply at buildbot.pypy.org Sat Apr 5 19:18:42 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 5 Apr 2014 19:18:42 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: there's now one extra guard (the guard_future_condition) Message-ID: <20140405171842.8AAB21C3359@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70471:98bf5d39e498 Date: 2014-04-05 19:16 +0200 http://bitbucket.org/pypy/pypy/changeset/98bf5d39e498/ Log: there's now one extra guard (the guard_future_condition) diff --git a/rpython/jit/metainterp/test/test_jitprof.py b/rpython/jit/metainterp/test/test_jitprof.py --- a/rpython/jit/metainterp/test/test_jitprof.py +++ b/rpython/jit/metainterp/test/test_jitprof.py @@ -53,7 +53,7 @@ ] assert profiler.events == expected assert profiler.times == [2, 1] - assert profiler.counters == [1, 1, 3, 3, 1, 15, 2, 0, 0, 0, 0, + assert profiler.counters == [1, 1, 3, 3, 2, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0] def test_simple_loop_with_call(self): From noreply at buildbot.pypy.org Sat Apr 5 19:18:44 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 5 Apr 2014 19:18:44 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: move the guard generation really directly before the JUMP (there was other Message-ID: <20140405171844.029371C3359@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70472:044fb52caa93 Date: 2014-04-05 19:17 +0200 http://bitbucket.org/pypy/pypy/changeset/044fb52caa93/ Log: move the guard generation really directly before the JUMP (there was other stuff generated in between in some cases, which caused the remaining test failures) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1049,7 +1049,6 @@ # much less expensive to blackhole out of. saved_pc = self.pc self.pc = orgpc - self.metainterp.generate_guard(rop.GUARD_FUTURE_CONDITION, resumepc=orgpc) self.metainterp.reached_loop_header(greenboxes, redboxes) self.pc = saved_pc # no exception, which means that the jit_merge_point did not @@ -2075,7 +2074,11 @@ duplicates) live_arg_boxes += self.virtualizable_boxes live_arg_boxes.pop() - # + + # generate a dummy guard just before the JUMP so that unroll can use it + # when it's creating artificial guards. + self.generate_guard(rop.GUARD_FUTURE_CONDITION) + assert len(self.virtualref_boxes) == 0, "missing virtual_ref_finish()?" # Called whenever we reach the 'loop_header' hint. # First, attempt to make a bridge: From noreply at buildbot.pypy.org Sat Apr 5 19:38:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 19:38:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: Trivial Message-ID: <20140405173802.02C661C350A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1128:fa16abcd0319 Date: 2014-04-05 18:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/fa16abcd0319/ Log: Trivial diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -293,8 +293,7 @@ break; /* done */ pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; endpagenum = NB_PAGES; - if (pagenum == endpagenum) - break; /* no pages in the 2nd section, so done too */ + continue; } page_check_and_reshare(pagenum); From noreply at buildbot.pypy.org Sat Apr 5 19:38:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 19:38:03 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: hg merge default Message-ID: <20140405173803.A1A4F1C350A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1129:712d45f8081f Date: 2014-04-05 18:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/712d45f8081f/ Log: hg merge default diff too long, truncating to 2000 out of 3308 lines diff --git a/c7/README.txt b/c7/README.txt --- a/c7/README.txt +++ b/c7/README.txt @@ -57,7 +57,7 @@ We have a small, fixed number of big pieces of memory called "segments". Each segment has enough (virtual) address space for all the objects that the program needs. This is actually allocated from a single big mmap() -so that pages can be exchanged between segments with remap_file_pages(). +so that pages can be shared between segments with remap_file_pages(). We call N the number of segments. Actual threads are not limited in number; they grab one segment in order to run GC-manipulating code, and release it afterwards. This is similar to what occurs with the GIL, @@ -81,20 +81,26 @@ --- much like the OS does after a fork() for pages modified by one or the other process. -In more details: the first page of addresses in each thread-local region -(4096 bytes) is made non-accessible, to detect errors of accessing the -NULL pointer. The second page is reserved for thread-local data. The -rest is divided into 1/16 for thread-local read markers, followed by -15/16 for the real objects. We initially use remap_file_pages() on this -15/16 range. The read markers are described below. +In more details: we actually get N + 1 consecutive segments, and segment +number 0 is reserved to contain the globally committed state of the +objects. The segments actually used by threads are numbered from 1 to +N. The first page of addresses in each segment is made non-accessible, +to detect errors of accessing the NULL pointer. The second page is +reserved for thread-local data. The rest is divided into 1/16 for +thread-local read markers, followed by 15/16 for the real objects. The +read markers are described below. We use remap_file_pages() on this +15/16 range: every page in this range can be either remapped to the same +page from segment 0 ("shared", the initial state), or remapped back to +itself ("private"). -Each transaction records the objects that it changed. These are -necessarily within unshared pages. When we want to commit a -transaction, we ask for a safe-point (suspending the other threads in a -known state), and then we copy again the modified objects into the other -version(s) of that data. The point is that, from another thread's point -of view, the memory didn't appear to change unexpectedly, but only when -waiting in a safe-point. +Each transaction records the objects that it changed, and makes sure +that the corresponding pages are "private" in this segment. When we +want to commit a transaction, we ask for a safe-point (suspending the +other threads in a known state), and then we copy the modified objects +into the share pages, as well as into the other segments if they are +also backed by private pages. The point is that, from another thread's +point of view, the memory didn't appear to change unexpectedly, but only +when waiting in a safe-point. Moreover, we detect read-write conflicts when trying to commit. To do this, each transaction needs to track in their own (private) read @@ -105,11 +111,13 @@ requiring an abort (which it will do when trying to leave the safe-point). -On the other hand, write-write conflicts are detected eagerly, which is -necessary to avoid that all segments contain a modified version of the -object and no segment is left with the original version. It is done -with a compare-and-swap into an array of write locks (only the first -time a given old object is modified by a given transaction). +On the other hand, write-write conflicts are detected eagerly. It is +done with a compare-and-swap into an array of write locks (only the +first time a given old object is modified by a given transaction). This +used to be necessary in some previous version, but is kept for now +because it would require more measurements to know if it's a good or bad +idea; the alternative is to simply let conflicting writes proceed and +detect the situation at commit time only. Object creation and GC @@ -127,7 +135,7 @@ objects that are also outside the nursery. - pages need to be unshared when they contain old objects that are then - modified. + modified (and only in this case). - we need a write barrier to detect the changes done to any non-nursery object (the first time only). This is just a flag check. Then the @@ -139,13 +147,15 @@ to be synchronized, but ideally the threads should then proceed to do a parallel GC (i.e. mark in all threads in parallel, and then sweep in al threads in parallel, with one arbitrary thread - taking on the additional coordination role needed). + taking on the additional coordination role needed). But we'll think + about it when it becomes a problem. - the major collections should be triggered by the amount of really-used - memory, which means: counting the unshared pages as N pages. Major - collection should then re-share the pages as much as possible. This is - the essential part that guarantees that old, no-longer-modified - bunches of objects are eventually present in only one copy in memory, - in shared pages --- while at the same time bounding the number of - calls to remap_file_pages() for each page at N-1 per major collection - cycle. + memory, which means: counting each actual copy of a private page + independently, but shared pages as one. Major collection will then + re-share the pages as much as possible. This is the essential part + that guarantees that old, no-longer-modified bunches of objects are + eventually present in only one copy in memory, in shared pages --- + while at the same time bounding the number of calls to + remap_file_pages() at two for each private page (one to privatize, one + to re-share) for a complete major collection cycle. diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -1,15 +1,13 @@ - -known-working revision: 5e4ec1af0e0c - - use small uniform gcpages - write barrier for big arrays -- weakrefs - finalizers - the highest_overflow_number can overflow after 2**30 non-collect-time minor collections -- re-enable the buggy RESHARE_PAGES=1, probably with a better impl +- fork() is done by copying the whole mmap non-lazily; improve. + +- contention.c: when pausing: should also tell other_pseg "please commit soon" diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -17,16 +17,20 @@ H_FILES = ../stmgc.h ../stm/*.h C_FILES = ../stmgc.c ../stm/*.c +COMMON = -I.. -pthread -lrt -g -Wall -Werror + # note that 'build' is partially optimized but still contains all asserts debug-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -DSTM_DEBUGPRINT -DSTM_GC_NURSERY=128 -g -O0 \ - $< -o debug-$* -Wall -Werror ../stmgc.c + clang $(COMMON) -DSTM_DEBUGPRINT -DSTM_GC_NURSERY=128 -O0 \ + $< -o debug-$* ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -DSTM_GC_NURSERY=128 -g -O1 \ - $< -o build-$* -Wall -Werror ../stmgc.c + clang $(COMMON) -DSTM_GC_NURSERY=128 -O1 $< -o build-$* ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* \ - -Wall -Werror ../stmgc.c + clang $(COMMON) -DNDEBUG -O2 $< -o release-$* ../stmgc.c + + +release-htm-%: %.c ../../htm-c7/stmgc.? ../../htm-c7/htm.h + clang $(COMMON) -O2 $< -o release-htm-$* ../../htm-c7/stmgc.c -DUSE_HTM diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -4,11 +4,20 @@ #include #include -#include "stmgc.h" +#ifdef USE_HTM +# include "../../htm-c7/stmgc.h" +#else +# include "stmgc.h" +#endif -#define NTHREADS 3 -#define LIST_LENGTH 2000 -#define BUNCH 100 +#define LIST_LENGTH 4000 +#define NTHREADS 2 + +#ifdef USE_HTM +# define BUNCH 200 +#else +# define BUNCH 200 +#endif typedef TLPREFIX struct node_s node_t; typedef node_t* nodeptr_t; @@ -175,6 +184,12 @@ static sem_t done; +void unregister_thread_local(void) +{ + stm_flush_timing(&stm_thread_local, 1); + stm_unregister_thread_local(&stm_thread_local); +} + void *demo2(void *arg) { int status; @@ -189,7 +204,7 @@ STM_POP_ROOT(stm_thread_local, global_chained_list); assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); - stm_unregister_thread_local(&stm_thread_local); + unregister_thread_local(); status = sem_post(&done); assert(status == 0); return NULL; } @@ -234,6 +249,7 @@ setup_list(); + for (i = 1; i <= NTHREADS; i++) { newthread(demo2, (void*)(uintptr_t)i); } @@ -245,7 +261,7 @@ final_check(); - stm_unregister_thread_local(&stm_thread_local); + unregister_thread_local(); stm_teardown(); return 0; diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include "stmgc.h" @@ -12,12 +14,14 @@ #define THREAD_STARTS 1000 // how many restarts of threads #define PREBUILT_ROOTS 3 #define MAXROOTS 1000 +#define FORKS 3 // SUPPORT struct node_s; typedef TLPREFIX struct node_s node_t; typedef node_t* nodeptr_t; typedef object_t* objptr_t; +int num_forked_children = 0; struct node_s { struct object_s hdr; @@ -281,7 +285,13 @@ return (objptr_t)-1; // break current } else if (get_rand(20) == 1) { push_roots(); - stm_become_inevitable("please"); + stm_become_inevitable(&stm_thread_local, "please"); + pop_roots(); + return NULL; + } else if (get_rand(240) == 1) { + push_roots(); + stm_become_globally_unique_transaction(&stm_thread_local, "really"); + fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num); pop_roots(); return NULL; } @@ -315,8 +325,9 @@ setup_thread(); - objptr_t p = NULL; + objptr_t p; stm_jmpbuf_t here; + volatile int call_fork = (arg != NULL); STM_START_TRANSACTION(&stm_thread_local, here); assert(td.num_roots >= td.num_roots_at_transaction_start); @@ -335,19 +346,39 @@ if (p == (objptr_t)-1) { push_roots(); - stm_commit_transaction(); - td.num_roots_at_transaction_start = td.num_roots; + if (call_fork == 0) { /* common case */ + stm_commit_transaction(); + td.num_roots_at_transaction_start = td.num_roots; + if (get_rand(100) < 98) { + STM_START_TRANSACTION(&stm_thread_local, here); + } else { + stm_start_inevitable_transaction(&stm_thread_local); + } + td.num_roots = td.num_roots_at_transaction_start; + p = NULL; + pop_roots(); + reload_roots(); + } + else { + /* run a fork() inside the transaction */ + printf("========== FORK =========\n"); + call_fork = 0; + pid_t child = fork(); + printf("=== in process %d thread %lx, fork() returned %d\n", + (int)getpid(), (long)pthread_self(), (int)child); + if (child == -1) { + fprintf(stderr, "fork() error: %m\n"); + abort(); + } + if (child != 0) + num_forked_children++; + else + num_forked_children = 0; - if (get_rand(100) < 98) { - STM_START_TRANSACTION(&stm_thread_local, here); - } else { - stm_start_inevitable_transaction(&stm_thread_local); + pop_roots(); + p = NULL; } - td.num_roots = td.num_roots_at_transaction_start; - p = NULL; - pop_roots(); - reload_roots(); } } stm_commit_transaction(); @@ -427,8 +458,24 @@ assert(status == 0); printf("thread finished\n"); if (thread_starts) { + long forkbase = NUMTHREADS * THREAD_STARTS / (FORKS + 1); + long _fork = (thread_starts % forkbase) == 0; thread_starts--; - newthread(demo_random, NULL); + newthread(demo_random, (void *)_fork); + } + } + + for (i = 0; i < num_forked_children; i++) { + pid_t child = wait(&status); + if (child == -1) + perror("wait"); + printf("From %d: child %d terminated with exit status %d\n", + (int)getpid(), (int)child, status); + if (WIFEXITED(status) && WEXITSTATUS(status) == 0) + ; + else { + printf("*** error from the child ***\n"); + return 1; } } diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_simple.c @@ -0,0 +1,111 @@ +#include +#include +#include +#include +#include + +#ifdef USE_HTM +# include "../../htm-c7/stmgc.h" +#else +# include "stmgc.h" +#endif + +#define ITERS 1000000 +#define NTHREADS 2 + + +typedef TLPREFIX struct node_s node_t; +typedef node_t* nodeptr_t; +typedef object_t* objptr_t; + +struct node_s { + struct object_s hdr; + long value; + nodeptr_t next; +}; + +__thread stm_thread_local_t stm_thread_local; + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + return sizeof(struct node_s); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + visit((object_t **)&n->next); +} + + + +static sem_t done; + +static __thread int tl_counter = 0; +static int gl_counter = 0; + +void *demo2(void *arg) +{ + int status; + stm_register_thread_local(&stm_thread_local); + tl_counter = 0; + + object_t *tmp; + int i = 0; + while (i < ITERS) { + stm_start_inevitable_transaction(&stm_thread_local); + tl_counter++; + if (i % 500 < 250) + STM_PUSH_ROOT(stm_thread_local, stm_allocate(16));//gl_counter++; + else + STM_POP_ROOT(stm_thread_local, tmp); + stm_commit_transaction(); + i++; + } + + assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + + stm_unregister_thread_local(&stm_thread_local); + status = sem_post(&done); assert(status == 0); + return NULL; +} + + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + + + +int main(void) +{ + int status, i; + + status = sem_init(&done, 0, 0); assert(status == 0); + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + + + for (i = 1; i <= NTHREADS; i++) { + newthread(demo2, (void*)(uintptr_t)i); + } + + for (i = 1; i <= NTHREADS; i++) { + status = sem_wait(&done); assert(status == 0); + } + + + stm_unregister_thread_local(&stm_thread_local); + stm_teardown(); + + return 0; +} diff --git a/c7/llvmfix/README.txt b/c7/llvmfix/README.txt new file mode 100644 --- /dev/null +++ b/c7/llvmfix/README.txt @@ -0,0 +1,24 @@ + +no-introduce-bogus-cast-in-combine.diff + + This is just fixes for a couple of bugs. + + +no-memset-creation-with-addrspace.diff + + This is a workaround for the fact that llvm.memset doesn't support + the address_space 256. It's a workaround, because it also prevents + some useful optimizations: for example replacing "x->a = 0; x->b = + 0;" with a single larger zeroing instruction. In other words, it + crashes only if an unpatched llvm introduce llvm.memset *and* this + memset remains as a real function call in the end. + + +addrspacecast-in-constant.diff + + This is a workaround for (what we believe to be) clang producing + incorrectly the addrspacecast operation for this kind of code: + + static int a = 42; + struct s1 { void __attribute__((address_space(256))) *a; }; + struct s1 fofo = { (void __attribute__((address_space(256))) *)(long)&a }; diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -69,6 +69,14 @@ __attribute__((unused)) static void cm_always_wait_for_other_thread(struct contmgr_s *cm) { + /* we tried this contention management, but it seems to have + very bad cases: if thread 1 always reads an object in every + transaction, and thread 2 wants to write this object just + once, then thread 2 will pause when it tries to commit; + it will wait until thread 1 committed; but by the time + thread 2 resumes again, thread 1 has already started the + next transaction and read the object again. + */ cm_abort_the_younger(cm); cm->try_sleep = true; } @@ -110,23 +118,37 @@ #ifdef STM_TESTS cm_abort_the_younger(&contmgr); #else - cm_always_wait_for_other_thread(&contmgr); + cm_pause_if_younger(&contmgr); #endif /* Fix the choices that are found incorrect due to TS_INEVITABLE - or NSE_SIGABORT */ - if (contmgr.other_pseg->pub.nursery_end == NSE_SIGABORT) { + or is_abort() */ + if (is_abort(contmgr.other_pseg->pub.nursery_end)) { contmgr.abort_other = true; contmgr.try_sleep = false; } else if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { assert(contmgr.other_pseg->transaction_state != TS_INEVITABLE); contmgr.abort_other = true; + contmgr.try_sleep = false; } else if (contmgr.other_pseg->transaction_state == TS_INEVITABLE) { contmgr.abort_other = false; } + + int wait_category = + kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ : + kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE : + STM_TIME_WAIT_OTHER; + + int abort_category = + kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE : + kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ : + kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE : + STM_TIME_RUN_ABORTED_OTHER; + + if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { /* Sleep. @@ -140,6 +162,10 @@ */ contmgr.other_pseg->signal_when_done = true; + change_timing_state(wait_category); + + /* XXX should also tell other_pseg "please commit soon" */ + dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; @@ -149,15 +175,20 @@ if (must_abort()) abort_with_mutex(); + + change_timing_state(STM_TIME_RUN_CURRENT); } + else if (!contmgr.abort_other) { dprintf(("abort in contention\n")); + STM_SEGMENT->nursery_end = abort_category; abort_with_mutex(); } + else { /* We have to signal the other thread to abort, and wait until it does. */ - contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; + contmgr.other_pseg->pub.nursery_end = abort_category; int sp = contmgr.other_pseg->safe_point; switch (sp) { diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -3,7 +3,11 @@ static void write_read_contention_management(uint8_t other_segment_num); static void inevitable_contention_management(uint8_t other_segment_num); +static inline bool is_abort(uintptr_t nursery_end) { + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); +} + static inline bool is_aborting_now(uint8_t other_segment_num) { - return (get_segment(other_segment_num)->nursery_end == NSE_SIGABORT && + return (is_abort(get_segment(other_segment_num)->nursery_end) && get_priv_segment(other_segment_num)->safe_point != SP_RUNNING); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -158,7 +158,7 @@ MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { /* fall-back */ #if STM_TESTS - stm_fatalerror("reset_transaction_read_version: %m\n"); + stm_fatalerror("reset_transaction_read_version: %m"); #endif memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); } @@ -173,7 +173,7 @@ retry: if (jmpbuf == NULL) { - wait_for_end_of_inevitable_transaction(false); + wait_for_end_of_inevitable_transaction(tl); } if (!acquire_thread_segment(tl)) @@ -182,6 +182,8 @@ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); + change_timing_state(STM_TIME_RUN_CURRENT); + STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); @@ -192,9 +194,9 @@ STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; + enter_safe_point_if_requested(); dprintf(("start_transaction\n")); - enter_safe_point_if_requested(); s_mutex_unlock(); /* Now running the SP_RUNNING start. We can set our @@ -434,7 +436,7 @@ list_clear(STM_PSEGMENT->modified_old_objects); } -static void _finish_transaction(void) +static void _finish_transaction(int attribute_to) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; @@ -443,6 +445,8 @@ LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->large_overflow_objects); + timing_end_transaction(attribute_to); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ @@ -456,13 +460,16 @@ minor_collection(/*commit=*/ true); + /* the call to minor_collection() above leaves us with + STM_TIME_BOOKKEEPING */ + s_mutex_lock(); restart: /* force all other threads to be paused. They will unpause automatically when we are done here, i.e. at mutex_unlock(). Important: we should not call cond_wait() in the meantime. */ - synchronize_all_threads(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); /* detect conflicts */ if (detect_write_read_conflicts()) @@ -504,10 +511,12 @@ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* wake up one thread in wait_for_end_of_inevitable_transaction() */ cond_signal(C_INEVITABLE); + if (globally_unique_transaction) + committed_globally_unique_transaction(); } /* done */ - _finish_transaction(); + _finish_transaction(STM_TIME_RUN_COMMITTED); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); @@ -582,6 +591,16 @@ */ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); + switch (pseg->transaction_state) { + case TS_REGULAR: + break; + case TS_INEVITABLE: + stm_fatalerror("abort: transaction_state == TS_INEVITABLE"); + default: + stm_fatalerror("abort: bad transaction_state == %d", + (int)pseg->transaction_state); + } + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -591,6 +610,7 @@ /* reset the tl->shadowstack and thread_local_obj to their original value before the transaction start */ stm_thread_local_t *tl = pseg->pub.running_thread; + assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; @@ -606,15 +626,6 @@ assert(_has_mutex()); dprintf(("~~~ ABORT\n")); - switch (STM_PSEGMENT->transaction_state) { - case TS_REGULAR: - break; - case TS_INEVITABLE: - stm_fatalerror("abort: transaction_state == TS_INEVITABLE"); - default: - stm_fatalerror("abort: bad transaction_state == %d", - (int)STM_PSEGMENT->transaction_state); - } assert(STM_PSEGMENT->running_pthread == pthread_self()); abort_data_structures_from_segment_num(STM_SEGMENT->segment_num); @@ -629,13 +640,16 @@ /* invoke the callbacks */ invoke_and_clear_callbacks_on_abort(); - if (STM_SEGMENT->nursery_end == NSE_SIGABORT) { + int attribute_to = STM_TIME_RUN_ABORTED_OTHER; + + if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ + attribute_to = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(); + _finish_transaction(attribute_to); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ @@ -668,11 +682,25 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); - wait_for_end_of_inevitable_transaction(true); + wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; clear_callbacks_on_abort(); } + else { + assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); + assert(STM_SEGMENT->jmpbuf_ptr == NULL); + } s_mutex_unlock(); } + +void stm_become_globally_unique_transaction(stm_thread_local_t *tl, + const char *msg) +{ + stm_become_inevitable(tl, msg); /* may still abort */ + + s_mutex_lock(); + synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE); + s_mutex_unlock(); +} diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -28,6 +28,8 @@ #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define OLD_RM_START ((END_NURSERY_PAGE * 4096UL) >> 4) +#define FIRST_OLD_RM_PAGE (OLD_RM_START / 4096UL) #define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) @@ -114,7 +116,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - uint64_t start_time; + double start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c new file mode 100644 --- /dev/null +++ b/c7/stm/forksupport.c @@ -0,0 +1,303 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +/* XXX this is currently not doing copy-on-write, but simply forces a + copy of all pages as soon as fork() is called. */ + + +static char *fork_big_copy = NULL; +static stm_thread_local_t *fork_this_tl; +static bool fork_was_in_transaction; + +static char *setup_mmap(char *reason); /* forward, in setup.c */ +static void setup_protection_settings(void); /* forward, in setup.c */ +static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ + + +static bool page_is_null(char *p) +{ + long *q = (long *)p; + long i; + for (i = 0; i < 4096 / sizeof(long); i++) + if (q[i] != 0) + return false; + return true; +} + + +static void forksupport_prepare(void) +{ + if (stm_object_pages == NULL) + return; + + /* So far we attempt to check this by walking all stm_thread_local_t, + marking the one from the current thread, and verifying that it's not + running a transaction. This assumes that the stm_thread_local_t is just + a __thread variable, so never changes threads. + */ + s_mutex_lock(); + + dprintf(("forksupport_prepare\n")); + fprintf(stderr, "[forking: for now, this operation can take some time]\n"); + + stm_thread_local_t *this_tl = NULL; + stm_thread_local_t *tl = stm_all_thread_locals; + do { + if (pthread_equal(*_get_cpth(tl), pthread_self())) { + if (this_tl != NULL) + stm_fatalerror("fork(): found several stm_thread_local_t" + " from the same thread"); + this_tl = tl; + } + tl = tl->next; + } while (tl != stm_all_thread_locals); + + if (this_tl == NULL) + stm_fatalerror("fork(): found no stm_thread_local_t from this thread"); + s_mutex_unlock(); + + bool was_in_transaction = _stm_in_transaction(this_tl); + if (was_in_transaction) { + stm_become_inevitable(this_tl, "fork"); + /* Note that the line above can still fail and abort, which should + be fine */ + } + else { + stm_start_inevitable_transaction(this_tl); + } + + s_mutex_lock(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); + mutex_pages_lock(); + + /* Make a new mmap at some other address, but of the same size as + the standard mmap at stm_object_pages + */ + char *big_copy = setup_mmap("stmgc's fork support"); + + /* Copy each of the segment infos into the new mmap, nurseries, + and associated read markers + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *src, *dst; + struct stm_priv_segment_info_s *psrc = get_priv_segment(i); + dst = big_copy + (((char *)psrc) - stm_object_pages); + *(struct stm_priv_segment_info_s *)dst = *psrc; + + src = get_segment_base(i) + FIRST_READMARKER_PAGE * 4096UL; + dst = big_copy + (src - stm_object_pages); + long j; + for (j = 0; j < END_NURSERY_PAGE - FIRST_READMARKER_PAGE; j++) { + if (!page_is_null(src)) + pagecopy(dst, src); + src += 4096; + dst += 4096; + } + } + + /* Copy all the data from the two ranges of objects (large, small) + into the new mmap + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + if (endpagenum < NB_PAGES) + endpagenum++; /* the next page too, because it might contain + data from largemalloc */ + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + pagenum--; /* the prev page too, because it does contain + data from largemalloc */ + endpagenum = NB_PAGES; + } + + char *src = stm_object_pages + pagenum * 4096UL; + char *dst = big_copy + pagenum * 4096UL; + pagecopy(dst, src); + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps.by_segment != 0) { + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + src += NB_PAGES * 4096UL; + dst += NB_PAGES * 4096UL; + if (ps.by_segment & (1 << j)) { + pagecopy(dst, src); + } + } + } + pagenum++; + } + + assert(fork_big_copy == NULL); + fork_big_copy = big_copy; + fork_this_tl = this_tl; + fork_was_in_transaction = was_in_transaction; + + assert(_has_mutex()); + dprintf(("forksupport_prepare: from %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); +} + +static void forksupport_parent(void) +{ + if (stm_object_pages == NULL) + return; + + dprintf(("forksupport_parent: continuing to run %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); + assert(_has_mutex()); + assert(_is_tl_registered(fork_this_tl)); + + /* In the parent, after fork(), we can simply forget about the big copy + that we made for the child. + */ + assert(fork_big_copy != NULL); + munmap(fork_big_copy, TOTAL_MEMORY); + fork_big_copy = NULL; + bool was_in_transaction = fork_was_in_transaction; + + mutex_pages_unlock(); + s_mutex_unlock(); + + if (!was_in_transaction) { + stm_commit_transaction(); + } + + dprintf(("forksupport_parent: continuing to run\n")); +} + +static void fork_abort_thread(long i) +{ + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + dprintf(("forksupport_child: abort in seg%ld\n", i)); + assert(pr->pub.running_thread->associated_segment_num == i); + assert(pr->transaction_state == TS_REGULAR); + set_gs_register(get_segment_base(i)); + + stm_jmpbuf_t jmpbuf; + if (__builtin_setjmp(jmpbuf) == 0) { + pr->pub.jmpbuf_ptr = &jmpbuf; +#ifndef NDEBUG + pr->running_pthread = pthread_self(); +#endif + pr->pub.running_thread->shadowstack = ( + pr->shadowstack_at_start_of_transaction); + stm_abort_transaction(); + } +} + +static void forksupport_child(void) +{ + if (stm_object_pages == NULL) + return; + + /* this new process contains no other thread, so we can + just release these locks early */ + mutex_pages_unlock(); + s_mutex_unlock(); + + /* Move the copy of the mmap over the old one, overwriting it + and thus freeing the old mapping in this process + */ + assert(fork_big_copy != NULL); + assert(stm_object_pages != NULL); + void *res = mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY, + MREMAP_MAYMOVE | MREMAP_FIXED, + stm_object_pages); + if (res != stm_object_pages) + stm_fatalerror("after fork: mremap failed: %m"); + fork_big_copy = NULL; + + /* Unregister all other stm_thread_local_t, mostly as a way to free + the memory used by the shadowstacks + */ + while (stm_all_thread_locals->next != stm_all_thread_locals) { + if (stm_all_thread_locals == fork_this_tl) + stm_unregister_thread_local(stm_all_thread_locals->next); + else + stm_unregister_thread_local(stm_all_thread_locals); + } + assert(stm_all_thread_locals == fork_this_tl); + + /* Restore the base setting of PROT_NONE pages. + */ + setup_protection_settings(); + + /* Make all pages shared again. + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (endpagenum == NB_PAGES) + break; /* done */ + } + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + if (!(ps.by_segment & (1 << j))) { + _page_do_reshare(j + 1, pagenum); + } + } + pagenum++; + } + + /* Force the interruption of other running segments + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + if (pr->pub.running_thread != NULL && + pr->pub.running_thread != fork_this_tl) { + fork_abort_thread(i); + } + } + + /* Restore a few things: the new pthread_self(), and the %gs + register */ + int segnum = fork_this_tl->associated_segment_num; + assert(1 <= segnum && segnum <= NB_SEGMENTS); + *_get_cpth(fork_this_tl) = pthread_self(); + set_gs_register(get_segment_base(segnum)); + assert(STM_SEGMENT->segment_num == segnum); + + if (!fork_was_in_transaction) { + stm_commit_transaction(); + } + + /* Done */ + dprintf(("forksupport_child: running one thread now\n")); +} + + +static void setup_forksupport(void) +{ + static bool fork_support_ready = false; + + if (!fork_support_ready) { + int res = pthread_atfork(forksupport_prepare, forksupport_parent, + forksupport_child); + if (res != 0) + stm_fatalerror("pthread_atfork() failed: %m"); + fork_support_ready = true; + } +} diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -8,8 +8,8 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm[%lx] ", dprintfcolor(), - (long)pthread_self()); + int size = (int)sprintf(buffer, "\033[%dm[%d,%lx] ", dprintfcolor(), + (int)getpid(), (long)pthread_self()); assert(size >= 0); va_start(ap, format); @@ -41,6 +41,7 @@ va_start(ap, format); vfprintf(stderr, format, ap); + fprintf(stderr, "\n"); va_end(ap); abort(); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -42,7 +42,7 @@ /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) - stm_fatalerror("not enough memory!\n"); + stm_fatalerror("not enough memory!"); if (addr + size > uninitialized_page_start) { uintptr_t npages; @@ -50,7 +50,7 @@ npages += GCPAGE_NUM_PAGES; if (uninitialized_page_stop - uninitialized_page_start < npages * 4096UL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } setup_N_pages(uninitialized_page_start, npages); uninitialized_page_start += npages * 4096UL; @@ -91,11 +91,15 @@ if (is_major_collection_requested()) { /* if still true */ - synchronize_all_threads(); + int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); if (is_major_collection_requested()) { /* if *still* true */ major_collection_now_at_safe_point(); } + + change_timing_state(oldstate); } s_mutex_unlock(); @@ -246,8 +250,7 @@ break; /* done */ pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; endpagenum = NB_PAGES; - if (pagenum == endpagenum) - break; /* no pages in the 2nd section, so done too */ + continue; } page_check_and_reshare(pagenum); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -273,8 +273,10 @@ /* unlink the following chunk */ mscan->d.next->prev = mscan->d.prev; mscan->d.prev->next = mscan->d.next; - assert((mscan->prev_size = (size_t)-258, 1)); /* 0xfffffffffffffefe */ - assert((mscan->size = (size_t)-515, 1)); /* 0xfffffffffffffdfd */ +#ifndef NDEBUG + mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ + mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ +#endif /* merge the two chunks */ assert(fsize == fscan->prev_size); diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -12,7 +12,7 @@ uintptr_t initial_allocation = 32; struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); if (lst == NULL) - stm_fatalerror("out of memory in list_create\n"); /* XXX */ + stm_fatalerror("out of memory in list_create"); /* XXX */ lst->count = 0; lst->last_allocated = initial_allocation - 1; @@ -24,7 +24,7 @@ nalloc = LIST_OVERCNT(nalloc); lst = realloc(lst, LIST_SETSIZE(nalloc)); if (lst == NULL) - stm_fatalerror("out of memory in _list_grow\n"); /* XXX */ + stm_fatalerror("out of memory in _list_grow"); /* XXX */ lst->last_allocated = nalloc - 1; return lst; @@ -93,7 +93,7 @@ //fprintf(stderr, "growth: %ld\n", newalloc); char *newitems = malloc(newalloc); if (newitems == NULL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } newtree.raw_start = newitems; newtree.raw_current = newitems; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -32,10 +32,6 @@ } } -static void teardown_nursery(void) -{ -} - static inline bool _is_in_nursery(object_t *obj) { assert((uintptr_t)obj >= NURSERY_START); @@ -229,8 +225,13 @@ realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + OPT_ASSERT((nursery_used & 7) == 0); memset(realnursery, 0, nursery_used); + /* assert that the rest of the nursery still contains only zeroes */ + assert_memset_zero(realnursery + nursery_used, + (NURSERY_END - _stm_nursery_start) - nursery_used); + pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; /* free any object left from 'young_outside_nursery' */ @@ -320,7 +321,11 @@ stm_safe_point(); + change_timing_state(STM_TIME_MINOR_GC); + _do_minor_collection(commit); + + change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT); } void stm_collect(long level) @@ -381,6 +386,7 @@ void _stm_set_nursery_free_count(uint64_t free_count) { assert(free_count <= NURSERY_SIZE); + assert((free_count & 7) == 0); _stm_nursery_start = NURSERY_END - free_count; long i; diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,10 +1,6 @@ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ -#define NSE_SIGPAUSE 0 -#define NSE_SIGABORT 1 -#if NSE_SIGABORT > _STM_NSE_SIGNAL_MAX -# error "update _STM_NSE_SIGNAL_MAX" -#endif +#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER static uint32_t highest_overflow_number; @@ -14,9 +10,7 @@ static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); static void major_do_minor_collections(void); -static inline bool must_abort(void) { - return STM_SEGMENT->nursery_end == NSE_SIGABORT; -} +#define must_abort() is_abort(STM_SEGMENT->nursery_end) static void assert_memset_zero(void *s, size_t n); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -30,9 +30,14 @@ static void mutex_pages_lock(void) { + if (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) == 0) + return; + + int previous = change_timing_state(STM_TIME_SPIN_LOOP); while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { spin_loop(); } + change_timing_state(previous); } static void mutex_pages_unlock(void) @@ -93,7 +98,7 @@ int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) - stm_fatalerror("remap_file_pages: %m\n"); + stm_fatalerror("remap_file_pages: %m"); } static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) @@ -103,6 +108,8 @@ segment 0. */ uintptr_t i; assert(_has_mutex_pages()); + if (count == 0) + return; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, @@ -140,6 +147,13 @@ mutex_pages_unlock(); } +static void _page_do_reshare(long segnum, uintptr_t pagenum) +{ + char *segment_base = get_segment_base(segnum); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); +} + static void page_reshare(uintptr_t pagenum) { struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; @@ -160,25 +174,24 @@ increment_total_allocated(total); } +static void pages_setup_readmarkers_for_nursery(void) +{ + /* The nursery page's read markers are never read, but must still + be writeable. We'd like to map the pages to a general "trash + page"; missing one, we remap all the pages over to the same one. + We still keep one page *per segment* to avoid cross-CPU cache + conflicts. -#if 0 -static bool is_fully_in_shared_pages(object_t *obj) -{ - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + (XXX no performance difference measured so far) + */ + long i, j; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); - if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) - return (flag_page_private[first_page] == SHARED_PAGE); - - ssize_t obj_size = stmcb_size_rounded_up( - (struct object_s *)REAL_ADDRESS(stm_object_pages, obj)); - - uintptr_t last_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; - - do { - if (flag_page_private[first_page++] != SHARED_PAGE) - return false; - } while (first_page <= last_page); - - return true; + for (j = FIRST_READMARKER_PAGE + 1; j < FIRST_OLD_RM_PAGE; j++) { + remap_file_pages(segment_base + 4096 * j, 4096, 0, + i * NB_PAGES + FIRST_READMARKER_PAGE, 0); + /* errors here ignored */ + } + } } -#endif diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -38,7 +38,10 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); static void page_reshare(uintptr_t pagenum); +static void _page_do_reshare(long segnum, uintptr_t pagenum); +static void pages_setup_readmarkers_for_nursery(void); +/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); static bool _has_mutex_pages(void) __attribute__((unused)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -3,6 +3,42 @@ #endif +static char *setup_mmap(char *reason) +{ + char *result = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (result == MAP_FAILED) + stm_fatalerror("%s failed: %m\n", reason); + + return result; +} + +static void setup_protection_settings(void) +{ + /* The segment 0 is not used to run transactions, but contains the + shared copy of the pages. We mprotect all pages before so that + accesses fail, up to and including the pages corresponding to the + nurseries of the other segments. */ + mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); + + /* In each segment, the first page is where TLPREFIX'ed + NULL accesses land. We mprotect it so that accesses fail. */ + mprotect(segment_base, 4096, PROT_NONE); + + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) + mprotect(segment_base + 8192, + (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); + } + pages_setup_readmarkers_for_nursery(); +} + void stm_setup(void) { /* Check that some values are acceptable */ @@ -20,38 +56,19 @@ (FIRST_READMARKER_PAGE * 4096UL)); assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - stm_object_pages = mmap(NULL, TOTAL_MEMORY, - PROT_READ | PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (stm_object_pages == MAP_FAILED) - stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); - - /* The segment 0 is not used to run transactions, but to contain the - shared copy of the pages. We mprotect all pages before so that - accesses fail, up to and including the pages corresponding to the - nurseries of the other segments. */ - mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); + setup_protection_settings(); long i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); - /* In each segment, the first page is where TLPREFIX'ed - NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(segment_base, 4096, PROT_NONE); - /* Fill the TLS page (page 1) with 0xDC, for debugging */ memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); - /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ - if (FIRST_READMARKER_PAGE > 2) - mprotect(segment_base + 8192, - (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); - /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ @@ -83,6 +100,7 @@ setup_nursery(); setup_gcpage(); setup_pages(); + setup_forksupport(); } void stm_teardown(void) @@ -110,12 +128,11 @@ teardown_core(); teardown_sync(); teardown_gcpage(); - teardown_nursery(); teardown_smallmalloc(); teardown_pages(); } -void _init_shadow_stack(stm_thread_local_t *tl) +static void _init_shadow_stack(stm_thread_local_t *tl) { struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); @@ -124,13 +141,18 @@ tl->shadowstack_base = s; } -void _done_shadow_stack(stm_thread_local_t *tl) +static void _done_shadow_stack(stm_thread_local_t *tl) { free(tl->shadowstack_base); tl->shadowstack = NULL; tl->shadowstack_base = NULL; } +static pthread_t *_get_cpth(stm_thread_local_t *tl) +{ + assert(sizeof(pthread_t) <= sizeof(tl->creating_pthread)); + return (pthread_t *)(tl->creating_pthread); +} void stm_register_thread_local(stm_thread_local_t *tl) { @@ -148,12 +170,15 @@ num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; + tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION; + tl->_timing_cur_start = get_stm_time(); /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); s_mutex_unlock(); @@ -162,6 +187,7 @@ void stm_unregister_thread_local(stm_thread_local_t *tl) { s_mutex_lock(); + assert(tl->prev != NULL); assert(tl->next != NULL); _done_shadow_stack(tl); if (tl == stm_all_thread_locals) { diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -31,7 +31,6 @@ pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread */ - uint64_t global_time; }; char reserved[192]; } sync_ctl __attribute__((aligned(64))); @@ -40,24 +39,24 @@ static void setup_sync(void) { if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0) - stm_fatalerror("mutex initialization: %m\n"); + stm_fatalerror("mutex initialization: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0) - stm_fatalerror("cond initialization: %m\n"); + stm_fatalerror("cond initialization: %m"); } } static void teardown_sync(void) { if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0) - stm_fatalerror("mutex destroy: %m\n"); + stm_fatalerror("mutex destroy: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) - stm_fatalerror("cond destroy: %m\n"); + stm_fatalerror("cond destroy: %m"); } memset(&sync_ctl, 0, sizeof(sync_ctl)); @@ -74,14 +73,14 @@ static void set_gs_register(char *value) { if (UNLIKELY(syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0)) - stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n"); + stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m"); } static inline void s_mutex_lock(void) { assert(!_has_mutex_here); if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_lock: %m\n"); + stm_fatalerror("pthread_mutex_lock: %m"); assert((_has_mutex_here = true, 1)); } @@ -89,44 +88,45 @@ { assert(_has_mutex_here); if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_unlock: %m\n"); + stm_fatalerror("pthread_mutex_unlock: %m"); assert((_has_mutex_here = false, 1)); } static inline void cond_wait(enum cond_type_e ctype) { #ifdef STM_NO_COND_WAIT - stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); + stm_fatalerror("*** cond_wait/%d called!", (int)ctype); #endif assert(_has_mutex_here); if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], &sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_wait/%d: %m", (int)ctype); } static inline void cond_signal(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_signal/%d: %m", (int)ctype); } static inline void cond_broadcast(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_broadcast/%d: %m", (int)ctype); } /************************************************************/ -static void wait_for_end_of_inevitable_transaction(bool can_abort) +static void wait_for_end_of_inevitable_transaction( + stm_thread_local_t *tl_or_null_if_can_abort) { long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { - if (can_abort) { + if (tl_or_null_if_can_abort == NULL) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), or wait for a while. If we go past this call, then we @@ -137,7 +137,11 @@ else { /* wait for stm_commit_transaction() to finish this inevitable transaction */ + change_timing_state_tl(tl_or_null_if_can_abort, + STM_TIME_WAIT_INEVITABLE); cond_wait(C_INEVITABLE); + /* don't bother changing the timing state again: the caller + will very soon go to STM_TIME_RUN_CURRENT */ } goto restart; } @@ -178,6 +182,7 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ + change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); /* Return false to the caller, which will call us again */ @@ -188,7 +193,6 @@ assert(STM_SEGMENT->segment_num == num); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; - STM_PSEGMENT->start_time = ++sync_ctl.global_time; return true; } @@ -306,6 +310,10 @@ static void enter_safe_point_if_requested(void) { + if (STM_SEGMENT->nursery_end == NURSERY_END) + return; /* fast path: no safe point requested */ + + int previous_state = -1; assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { @@ -319,15 +327,24 @@ /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ - +#ifdef STM_TESTS + abort_with_mutex(); +#endif + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_PAUSE); + } cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; } + + if (previous_state != -1) { + change_timing_state(previous_state); + } } -static void synchronize_all_threads(void) +static void synchronize_all_threads(enum sync_type_e sync_type) { enter_safe_point_if_requested(); @@ -335,7 +352,13 @@ why: if several threads call this function, the first one that goes past this point will set the "request safe point" on all other threads; then none of the other threads will go past the - enter_safe_point_if_requested() above. */ + enter_safe_point_if_requested() above. + */ + if (UNLIKELY(globally_unique_transaction)) { + assert(count_other_threads_sp_running() == 0); + return; + } + signal_everybody_to_pause_running(); /* If some other threads are SP_RUNNING, we cannot proceed now. @@ -352,6 +375,13 @@ } } + if (UNLIKELY(sync_type == STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE)) { + globally_unique_transaction = true; + assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + STM_SEGMENT->nursery_end = NURSERY_END; + return; /* don't remove the requests for safe-points in this case */ + } + /* Remove the requests for safe-points now. In principle we should remove it later, when the caller is done, but this is equivalent as long as we hold the mutex. @@ -359,6 +389,15 @@ remove_requests_for_safe_point(); /* => C_REQUEST_REMOVED */ } +static void committed_globally_unique_transaction(void) +{ + assert(globally_unique_transaction); + assert(STM_SEGMENT->nursery_end == NURSERY_END); + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + globally_unique_transaction = false; + remove_requests_for_safe_point(); +} + void _stm_collectable_safe_point(void) { /* If 'nursery_end' was set to NSE_SIGxxx by another thread, diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -28,7 +28,13 @@ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); -static void wait_for_end_of_inevitable_transaction(bool can_abort); -static void synchronize_all_threads(void); +static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *); -static bool pause_signalled; +enum sync_type_e { + STOP_OTHERS_UNTIL_MUTEX_UNLOCK, + STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE, +}; +static void synchronize_all_threads(enum sync_type_e sync_type); +static void committed_globally_unique_transaction(void); + +static bool pause_signalled, globally_unique_transaction; diff --git a/c7/stm/timing.c b/c7/stm/timing.c new file mode 100644 --- /dev/null +++ b/c7/stm/timing.c @@ -0,0 +1,79 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static inline void add_timing(stm_thread_local_t *tl, enum stm_time_e category, + double elapsed) +{ + tl->timing[category] += elapsed; + tl->events[category] += 1; +} + +#define TIMING_CHANGE(tl, newstate) \ + double curtime = get_stm_time(); \ + double elasped = curtime - tl->_timing_cur_start; \ + enum stm_time_e oldstate = tl->_timing_cur_state; \ + add_timing(tl, oldstate, elasped); \ + tl->_timing_cur_state = newstate; \ + tl->_timing_cur_start = curtime + +static enum stm_time_e change_timing_state(enum stm_time_e newstate) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + TIMING_CHANGE(tl, newstate); + return oldstate; +} + +static void change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) +{ + TIMING_CHANGE(tl, newstate); +} + +static void timing_end_transaction(enum stm_time_e attribute_to) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); + add_timing(tl, attribute_to, tl->timing[STM_TIME_RUN_CURRENT]); + tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; +} + +static const char *timer_names[] = { + "outside transaction", + "run current", + "run committed", + "run aborted write write", + "run aborted write read", + "run aborted inevitable", + "run aborted other", + "wait free segment", + "wait write read", + "wait inevitable", + "wait other", + "bookkeeping", + "minor gc", + "major gc", + "sync pause", + "spin loop", +}; + +void stm_flush_timing(stm_thread_local_t *tl, int verbose) +{ + enum stm_time_e category = tl->_timing_cur_state; + uint64_t oldevents = tl->events[category]; + TIMING_CHANGE(tl, category); + tl->events[category] = oldevents; + + assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); + if (verbose > 0) { + int i; + s_mutex_lock(); + fprintf(stderr, "thread %p:\n", tl); + for (i = 0; i < _STM_TIME_N; i++) { + fprintf(stderr, " %-24s %9u %.3f s\n", + timer_names[i], tl->events[i], (double)tl->timing[i]); + } + s_mutex_unlock(); + } +} diff --git a/c7/stm/timing.h b/c7/stm/timing.h new file mode 100644 --- /dev/null +++ b/c7/stm/timing.h @@ -0,0 +1,14 @@ +#include + +static inline double get_stm_time(void) +{ + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + return tp.tv_sec + tp.tv_nsec * 0.000000001; +} + +static enum stm_time_e change_timing_state(enum stm_time_e newstate); +static void change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); + +static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -125,7 +125,9 @@ } ssize_t size = 16; - object_t *pointing_to = *WEAKREF_PTR(weakref, size); + stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); + char *real_wr = REAL_ADDRESS(pseg->pub.segment_base, wr); + object_t *pointing_to = *(object_t **)real_wr; assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -14,6 +14,7 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" +#include "stm/timing.h" #include "stm/misc.c" #include "stm/list.c" @@ -25,6 +26,7 @@ #include "stm/smallmalloc.c" #include "stm/nursery.c" #include "stm/sync.c" +#include "stm/forksupport.c" #include "stm/setup.c" #include "stm/hash_id.c" #include "stm/core.c" @@ -32,3 +34,4 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" +#include "stm/timing.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -54,6 +54,26 @@ object_t *ss; }; +enum stm_time_e { + STM_TIME_OUTSIDE_TRANSACTION, + STM_TIME_RUN_CURRENT, + STM_TIME_RUN_COMMITTED, + STM_TIME_RUN_ABORTED_WRITE_WRITE, + STM_TIME_RUN_ABORTED_WRITE_READ, + STM_TIME_RUN_ABORTED_INEVITABLE, + STM_TIME_RUN_ABORTED_OTHER, + STM_TIME_WAIT_FREE_SEGMENT, + STM_TIME_WAIT_WRITE_READ, + STM_TIME_WAIT_INEVITABLE, + STM_TIME_WAIT_OTHER, + STM_TIME_BOOKKEEPING, + STM_TIME_MINOR_GC, + STM_TIME_MAJOR_GC, + STM_TIME_SYNC_PAUSE, + STM_TIME_SPIN_LOOP, + _STM_TIME_N +}; + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -66,9 +86,15 @@ /* after an abort, some details about the abort are stored there. (these fields are not modified on a successful commit) */ long last_abort__bytes_in_nursery; + /* timing information, accumulated */ + uint32_t events[_STM_TIME_N]; + float timing[_STM_TIME_N]; + double _timing_cur_start; + enum stm_time_e _timing_cur_state; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; + void *creating_pthread[2]; } stm_thread_local_t; /* this should use llvm's coldcc calling convention, @@ -112,7 +138,7 @@ #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 -#define _STM_NSE_SIGNAL_MAX 1 +#define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) From noreply at buildbot.pypy.org Sat Apr 5 19:38:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 19:38:04 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: in-progress Message-ID: <20140405173804.D1B531C350A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1130:b75f80e3f905 Date: 2014-04-05 19:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/b75f80e3f905/ Log: in-progress diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -64,7 +64,12 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up) { /* only for tests xxx but stm_setup_prebuilt() uses this now too */ - char *p = allocate_outside_nursery_large(size_rounded_up); + char *p; + if (size_rounded_up > GC_LAST_SMALL_SIZE) + p = allocate_outside_nursery_large(size_rounded_up); + else + p = allocate_outside_nursery_small(size_rounded_up); + memset(p, 0, size_rounded_up); object_t *o = (object_t *)(p - stm_object_pages); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -103,7 +103,7 @@ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size = stmcb_size_rounded_up((struct object_s *)realobj); - if (size >= GC_N_SMALL_REQUESTS) { + if (size > GC_LAST_SMALL_SIZE) { /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c --- a/c7/stm/smallmalloc.c +++ b/c7/stm/smallmalloc.c @@ -3,10 +3,33 @@ #endif +#define PAGE_SMSIZE_START END_NURSERY_PAGE +#define PAGE_SMSIZE_END NB_PAGES + +typedef struct { + uint8_t sz; +} fpsz_t; + +static fpsz_t full_pages_object_size[PAGE_SMSIZE_END - PAGE_SMSIZE_START]; +/* ^^^ This array contains the size (in number of words) of the objects + in the given page, provided it's a "full page of small objects". It + is 0 if it's not such a page, if it's fully free, or if it's in + small_page_lists. It is not 0 as soon as the page enters the + segment's 'small_malloc_data.loc_free' (even if the page is not + technically full yet, it will be very soon in this case). +*/ + +static fpsz_t *get_fp_sz(char *smallpage) +{ + uintptr_t pagenum = (((char *)smallpage) - stm_object_pages) / 4096; + return &full_pages_object_size[pagenum - PAGE_SMSIZE_START]; +} + + static void teardown_smallmalloc(void) { memset(small_page_lists, 0, sizeof(small_page_lists)); - assert(free_uniform_pages == NULL); + assert(free_uniform_pages == NULL); /* done by the previous line */ first_small_uniform_loc = (uintptr_t) -1; } @@ -69,6 +92,7 @@ /* Succeeded: we have a page in 'smallpage' */ *fl = smallpage->header.next; + get_fp_sz((char *)smallpage)->sz = n; return (char *)smallpage; } @@ -101,6 +125,7 @@ /* The first slot is immediately returned */ *fl = following; + get_fp_sz((char *)smallpage)->sz = n; return (char *)smallpage; } @@ -114,7 +139,7 @@ static inline char *allocate_outside_nursery_small(uint64_t size) { OPT_ASSERT((size & 7) == 0); - OPT_ASSERT(16 <= size && size < 8 * GC_N_SMALL_REQUESTS); + OPT_ASSERT(16 <= size && size <= GC_LAST_SMALL_SIZE); struct small_free_loc_s *TLPREFIX *fl = &STM_PSEGMENT->small_malloc_data.loc_free[size / 8]; @@ -127,3 +152,28 @@ *fl = result->next; return (char *)result; } + +void _stm_smallmalloc_sweep(void) +{ + long i; + for (i = 2; i < GC_N_SMALL_REQUESTS; i++) { + struct small_page_list_s *page = small_page_lists[i]; + while (page != NULL) { + /* for every page in small_page_lists: assert that the + corresponding full_pages_object_size[] entry is 0 */ + assert(get_fp_sz((char *)page)->sz == 0); + abort(); // walk + page = page->nextpage; + } + } + + fpsz_t *fpsz_start = get_fp_sz(uninitialized_page_stop); + fpsz_t *fpsz_end = &full_pages_object_size[PAGE_SMSIZE_END - + PAGE_SMSIZE_START]; + fpsz_t *fpsz; + for (fpsz = fpsz_start; fpsz < fpsz_end; fpsz++) { + if (fpsz->sz != 0) { + abort(); // walk + } + } +} diff --git a/c7/stm/smallmalloc.h b/c7/stm/smallmalloc.h --- a/c7/stm/smallmalloc.h +++ b/c7/stm/smallmalloc.h @@ -8,6 +8,7 @@ */ #define GC_N_SMALL_REQUESTS 36 +#define GC_LAST_SMALL_SIZE (8 * (GC_N_SMALL_REQUESTS - 1)) struct small_free_loc_s { @@ -19,8 +20,9 @@ free. */ struct small_free_loc_s header; - /* A chained list of all small pages containing objects of - a given small size, and that have at least one free object. */ + /* A chained list of all small pages containing objects of a given + small size, and that have at least one free object. It points + *inside* the next page, to another struct small_page_list_s. */ struct small_page_list_s *nextpage; /* This structure is only two words, so it always fits inside one @@ -57,7 +59,8 @@ static inline char *allocate_outside_nursery_small(uint64_t size) __attribute__((always_inline)); -static char *_allocate_small_slowpath(uint64_t size); +void _stm_smallmalloc_sweep(void); + static void teardown_smallmalloc(void); static inline bool is_small_uniform(object_t *obj) { diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -125,6 +125,8 @@ void _stm_large_dump(void); bool (*_stm_largemalloc_keep)(char *data); void _stm_largemalloc_sweep(void); +bool (*_stm_smallmalloc_keep)(char *data); +void _stm_smallmalloc_sweep(void); void _stm_start_safe_point(void); void _stm_stop_safe_point(void); void _stm_set_nursery_free_count(uint64_t free_count); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -81,6 +81,7 @@ void *memset(void *s, int c, size_t n); bool (*_stm_largemalloc_keep)(char *data); void _stm_largemalloc_sweep(void); +void _stm_smallmalloc_sweep(void); ssize_t stmcb_size_rounded_up(struct object_s *obj); @@ -290,6 +291,8 @@ assert HDR == 8 GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER NB_SEGMENTS = lib.STM_NB_SEGMENTS +GC_N_SMALL_REQUESTS = 36 +GC_LAST_SMALL_SIZE = 8 * (GC_N_SMALL_REQUESTS - 1) class Conflict(Exception): diff --git a/c7/test/test_smallmalloc.py b/c7/test/test_smallmalloc.py new file mode 100644 --- /dev/null +++ b/c7/test/test_smallmalloc.py @@ -0,0 +1,32 @@ +from support import * + + +def pageof(p): + return int(ffi.cast("uintptr_t", p)) >> 12 + + +class TestLargeMalloc(BaseTest): + + def test_simple_uniform(self): + page0 = [stm_allocate_old(16) for i in range(0, 4096, 16)] + assert len(set(map(pageof, page0))) == 1 + # + page1 = [stm_allocate_old(16) for i in range(0, 4096, 16)] + assert len(set(map(pageof, page1))) == 1 + # + assert len(set(map(pageof, page0 + page1))) == 2 + + def test_different_sizes_different_pages(self): + seen = [] + for i in range(2, GC_N_SMALL_REQUESTS): + p = pageof(stm_allocate_old(8 * i)) + assert p not in seen + seen.append(p) + for i in range(2, GC_N_SMALL_REQUESTS): + p = pageof(stm_allocate_old(8 * i)) + assert p == seen[0] + seen.pop(0) + + def test_sweep_freeing(self): + p1 = stm_allocate_old(16) + lib._stm_smallmalloc_sweep() From noreply at buildbot.pypy.org Sat Apr 5 19:55:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 19:55:10 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: in-progress Message-ID: <20140405175510.F3D391C350A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1131:48f7f40cd3b2 Date: 2014-04-05 19:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/48f7f40cd3b2/ Log: in-progress diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c --- a/c7/stm/smallmalloc.c +++ b/c7/stm/smallmalloc.c @@ -153,27 +153,59 @@ return (char *)result; } +void sweep_small_page_full(char *page, long szword) +{ + abort(); +} + +void sweep_small_page_partial(struct small_free_loc_s *free_loc, long szword) +{ + abort(); +} + void _stm_smallmalloc_sweep(void) { - long i; - for (i = 2; i < GC_N_SMALL_REQUESTS; i++) { - struct small_page_list_s *page = small_page_lists[i]; + long i, szword; + for (szword = 2; szword < GC_N_SMALL_REQUESTS; szword++) { + struct small_page_list_s *page = small_page_lists[szword]; + struct small_page_list_s *nextpage; + small_page_lists[szword] = NULL; + + /* process the pages that the various segments are busy filling */ + for (i = 1; i <= NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct small_free_loc_s **fl = + &pseg->small_malloc_data.loc_free[szword]; + if (*fl != NULL) { + /* the entry in full_pages_object_size[] should already be + szword. We reset it to 0. */ + fpsz_t *fpsz = get_fp_sz((char *)*fl); + assert(fpsz->sz == szword); + fpsz->sz = 0; + sweep_small_page_partial(*fl, szword); + *fl = NULL; + } + } + + /* process all the other partially-filled pages */ while (page != NULL) { /* for every page in small_page_lists: assert that the corresponding full_pages_object_size[] entry is 0 */ assert(get_fp_sz((char *)page)->sz == 0); - abort(); // walk - page = page->nextpage; + nextpage = page->nextpage; + sweep_small_page_partial(&page->header, szword); + page = nextpage; } } - fpsz_t *fpsz_start = get_fp_sz(uninitialized_page_stop); + char *pageptr = uninitialized_page_stop; + fpsz_t *fpsz_start = get_fp_sz(pageptr); fpsz_t *fpsz_end = &full_pages_object_size[PAGE_SMSIZE_END - PAGE_SMSIZE_START]; fpsz_t *fpsz; - for (fpsz = fpsz_start; fpsz < fpsz_end; fpsz++) { + for (fpsz = fpsz_start; fpsz < fpsz_end; fpsz++, pageptr += 4096) { if (fpsz->sz != 0) { - abort(); // walk + sweep_small_page_full(pageptr, fpsz->sz); } } } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -81,6 +81,7 @@ void *memset(void *s, int c, size_t n); bool (*_stm_largemalloc_keep)(char *data); void _stm_largemalloc_sweep(void); +bool (*_stm_smallmalloc_keep)(char *data); void _stm_smallmalloc_sweep(void); ssize_t stmcb_size_rounded_up(struct object_s *obj); From noreply at buildbot.pypy.org Sat Apr 5 20:47:37 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 5 Apr 2014 20:47:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove unused feature Message-ID: <20140405184737.3C3FD1C3359@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70473:6a6e8602aa36 Date: 2014-04-05 11:47 -0700 http://bitbucket.org/pypy/pypy/changeset/6a6e8602aa36/ Log: Remove unused feature diff --git a/rpython/flowspace/test/test_unroll.py b/rpython/flowspace/test/test_unroll.py --- a/rpython/flowspace/test/test_unroll.py +++ b/rpython/flowspace/test/test_unroll.py @@ -1,23 +1,10 @@ import operator + from rpython.flowspace.test.test_objspace import Base -from rpython.rlib.unroll import unrolling_zero, unrolling_iterable +from rpython.rlib.unroll import unrolling_iterable + class TestUnroll(Base): - - def test_unrolling_int(self): - l = range(10) - def f(tot): - i = unrolling_zero - while i < len(l): - tot += l[i] - i = i + 1 - return tot*2 - assert f(0) == sum(l)*2 - - graph = self.codetest(f) - ops = self.all_operations(graph) - assert ops == {'inplace_add': 10, 'mul': 1} - def test_unroller(self): l = unrolling_iterable(range(10)) def f(tot): diff --git a/rpython/rlib/unroll.py b/rpython/rlib/unroll.py --- a/rpython/rlib/unroll.py +++ b/rpython/rlib/unroll.py @@ -6,31 +6,14 @@ class SpecTag(object): __slots__ = () - + def __repr__(self): return '%s(0x%x)' % (self.__class__.__name__, uid(self)) + def _freeze_(self): return True -class unrolling_int(int, SpecTag): - - def __add__(self, other): - return unrolling_int(int.__add__(self, other)) - - __radd__ = __add__ - - def __sub__(self, other): - return unrolling_int(int.__sub__(self, other)) - - def __rsub__(self, other): - return unrolling_int(int.__rsub__(self, other)) - - -unrolling_zero = unrolling_int(0) - -# ____________________________________________________________ - # 'for' iteration over iterables wrapped in an instance # of unrolling_iterable will be unrolled by the flow space, # like in: From noreply at buildbot.pypy.org Sat Apr 5 22:18:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 22:18:13 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: Tests pass so far Message-ID: <20140405201814.058D01C3566@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1132:2943d03c84e8 Date: 2014-04-05 22:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/2943d03c84e8/ Log: Tests pass so far diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -64,12 +64,7 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up) { /* only for tests xxx but stm_setup_prebuilt() uses this now too */ - char *p; - if (size_rounded_up > GC_LAST_SMALL_SIZE) - p = allocate_outside_nursery_large(size_rounded_up); - else - p = allocate_outside_nursery_small(size_rounded_up); - + char *p = allocate_outside_nursery_large(size_rounded_up); memset(p, 0, size_rounded_up); object_t *o = (object_t *)(p - stm_object_pages); diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c --- a/c7/stm/smallmalloc.c +++ b/c7/stm/smallmalloc.c @@ -19,18 +19,27 @@ technically full yet, it will be very soon in this case). */ -static fpsz_t *get_fp_sz(char *smallpage) +static fpsz_t *get_fpsz(char *smallpage) { uintptr_t pagenum = (((char *)smallpage) - stm_object_pages) / 4096; + assert(PAGE_SMSIZE_START <= pagenum && pagenum < PAGE_SMSIZE_END); return &full_pages_object_size[pagenum - PAGE_SMSIZE_START]; } +#ifdef STM_TESTS +bool (*_stm_smallmalloc_keep)(char *data); /* a hook for tests */ +#endif + static void teardown_smallmalloc(void) { memset(small_page_lists, 0, sizeof(small_page_lists)); assert(free_uniform_pages == NULL); /* done by the previous line */ first_small_uniform_loc = (uintptr_t) -1; +#ifdef STM_TESTS + _stm_smallmalloc_keep = NULL; +#endif + memset(full_pages_object_size, 0, sizeof(full_pages_object_size)); } static void grab_more_free_pages_for_small_allocations(void) @@ -59,8 +68,8 @@ char *p = uninitialized_page_stop; long i; for (i = 0; i < GCPAGE_NUM_PAGES; i++) { - ((struct small_page_list_s *)p)->nextpage = free_uniform_pages; - free_uniform_pages = (struct small_page_list_s *)p; + ((struct small_free_loc_s *)p)->nextpage = free_uniform_pages; + free_uniform_pages = (struct small_free_loc_s *)p; p += 4096; } } @@ -75,7 +84,7 @@ static char *_allocate_small_slowpath(uint64_t size) { long n = size / 8; - struct small_page_list_s *smallpage; + struct small_free_loc_s *smallpage; struct small_free_loc_s *TLPREFIX *fl = &STM_PSEGMENT->small_malloc_data.loc_free[n]; assert(*fl == NULL); @@ -91,8 +100,8 @@ goto retry; /* Succeeded: we have a page in 'smallpage' */ - *fl = smallpage->header.next; - get_fp_sz((char *)smallpage)->sz = n; + *fl = smallpage->next; + get_fpsz((char *)smallpage)->sz = n; return (char *)smallpage; } @@ -110,22 +119,24 @@ initialized so far, apart from the 'nextpage' field read above. Initialize it. */ + struct small_free_loc_s *p, **previous; assert(!(((uintptr_t)smallpage) & 4095)); - struct small_free_loc_s *p, *following = NULL; + previous = (struct small_free_loc_s **) + REAL_ADDRESS(STM_SEGMENT->segment_base, fl); /* Initialize all slots from the second one to the last one to contain a chained list */ uintptr_t i = size; while (i <= 4096 - size) { p = (struct small_free_loc_s *)(((char *)smallpage) + i); - p->next = following; - following = p; + *previous = p; + previous = &p->next; i += size; } + *previous = NULL; /* The first slot is immediately returned */ - *fl = following; - get_fp_sz((char *)smallpage)->sz = n; + get_fpsz((char *)smallpage)->sz = n; return (char *)smallpage; } @@ -153,22 +164,97 @@ return (char *)result; } +object_t *_stm_allocate_old_small(ssize_t size_rounded_up) +{ + char *p = allocate_outside_nursery_small(size_rounded_up); + return (object_t *)(p - stm_object_pages); +} + +/************************************************************/ + +static inline bool _smallmalloc_sweep_keep(char *p) +{ +#ifdef STM_TESTS + if (_stm_smallmalloc_keep != NULL) + return _stm_smallmalloc_keep(p); +#endif + abort(); + //return smallmalloc_keep_object_at(p); +} + +void check_order_inside_small_page(struct small_free_loc_s *page) +{ +#ifndef NDEBUG + /* the free locations are supposed to be in increasing order */ + while (page->next != NULL) { + assert(page->next > page); + page = page->next; + } +#endif +} + void sweep_small_page_full(char *page, long szword) { abort(); } -void sweep_small_page_partial(struct small_free_loc_s *free_loc, long szword) +void sweep_small_page_partial(struct small_free_loc_s *page, long szword) { - abort(); + check_order_inside_small_page(page); + + /* for every non-free location, ask if we must free it */ + char *baseptr = (char *)(((uintptr_t)page) & ~4095); + uintptr_t i, size = szword * 8; + bool any_object_remaining = false; + struct small_free_loc_s *fl = page; + struct small_free_loc_s *flprev = NULL; + + /* XXX could optimize for the case where all objects die: we don't + need to painfully rebuild the free list in the whole page, just + to have it ignored in the end because we put the page into + 'free_uniform_pages' */ + + for (i = 0; i <= 4096 - size; i += size) { + char *p = baseptr + i; + if (p == (char *)fl) { + /* location is already free */ + flprev = fl; + fl = fl->next; + } + else if (_smallmalloc_sweep_keep(p)) { + /* the location should be freed now */ + if (flprev == NULL) { + flprev = (struct small_free_loc_s *)p; + flprev->next = fl; + page = flprev; + } + else { + assert(flprev->next == fl); + flprev->next = (struct small_free_loc_s *)p; + flprev->next->next = fl; + } + } + else { + any_object_remaining = true; + } + } + if (any_object_remaining) { + check_order_inside_small_page(page); + page->nextpage = small_page_lists[szword]; + small_page_lists[szword] = page; + } + else { + ((struct small_free_loc_s *)baseptr)->nextpage = free_uniform_pages; + free_uniform_pages = (struct small_free_loc_s *)baseptr; + } } void _stm_smallmalloc_sweep(void) { long i, szword; for (szword = 2; szword < GC_N_SMALL_REQUESTS; szword++) { - struct small_page_list_s *page = small_page_lists[szword]; - struct small_page_list_s *nextpage; + struct small_free_loc_s *page = small_page_lists[szword]; + struct small_free_loc_s *nextpage; small_page_lists[szword] = NULL; /* process the pages that the various segments are busy filling */ @@ -179,7 +265,7 @@ if (*fl != NULL) { /* the entry in full_pages_object_size[] should already be szword. We reset it to 0. */ - fpsz_t *fpsz = get_fp_sz((char *)*fl); + fpsz_t *fpsz = get_fpsz((char *)*fl); assert(fpsz->sz == szword); fpsz->sz = 0; sweep_small_page_partial(*fl, szword); @@ -191,15 +277,17 @@ while (page != NULL) { /* for every page in small_page_lists: assert that the corresponding full_pages_object_size[] entry is 0 */ - assert(get_fp_sz((char *)page)->sz == 0); + assert(get_fpsz((char *)page)->sz == 0); nextpage = page->nextpage; - sweep_small_page_partial(&page->header, szword); + sweep_small_page_partial(page, szword); page = nextpage; } } + /* process the really full pages, which are the ones which still + have a non-zero full_pages_object_size[] entry */ char *pageptr = uninitialized_page_stop; - fpsz_t *fpsz_start = get_fp_sz(pageptr); + fpsz_t *fpsz_start = get_fpsz(pageptr); fpsz_t *fpsz_end = &full_pages_object_size[PAGE_SMSIZE_END - PAGE_SMSIZE_START]; fpsz_t *fpsz; diff --git a/c7/stm/smallmalloc.h b/c7/stm/smallmalloc.h --- a/c7/stm/smallmalloc.h +++ b/c7/stm/smallmalloc.h @@ -12,18 +12,16 @@ struct small_free_loc_s { - struct small_free_loc_s *next; -}; - -struct small_page_list_s { /* A chained list of locations within the same page which are free. */ - struct small_free_loc_s header; + struct small_free_loc_s *next; /* A chained list of all small pages containing objects of a given small size, and that have at least one free object. It points - *inside* the next page, to another struct small_page_list_s. */ - struct small_page_list_s *nextpage; + *inside* the next page, to another struct small_free_loc_s. This + field is only meaningful on the first small_free_loc_s of a given + page! */ + struct small_free_loc_s *nextpage; /* This structure is only two words, so it always fits inside one free slot inside the page. */ @@ -36,7 +34,7 @@ is a chained list of fully-free pages (which can be reused for a different size than the one they originally contained). */ -static struct small_page_list_s *small_page_lists[GC_N_SMALL_REQUESTS]; +static struct small_free_loc_s *small_page_lists[GC_N_SMALL_REQUESTS]; #define free_uniform_pages (small_page_lists[0]) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -125,6 +125,7 @@ void _stm_large_dump(void); bool (*_stm_largemalloc_keep)(char *data); void _stm_largemalloc_sweep(void); +object_t *_stm_allocate_old_small(ssize_t size_rounded_up); bool (*_stm_smallmalloc_keep)(char *data); void _stm_smallmalloc_sweep(void); void _stm_start_safe_point(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -81,6 +81,7 @@ void *memset(void *s, int c, size_t n); bool (*_stm_largemalloc_keep)(char *data); void _stm_largemalloc_sweep(void); +object_t *_stm_allocate_old_small(ssize_t size_rounded_up); bool (*_stm_smallmalloc_keep)(char *data); void _stm_smallmalloc_sweep(void); @@ -317,6 +318,12 @@ lib._set_type_id(o, tid) return o +def stm_allocate_old_small(size): + o = lib._stm_allocate_old_small(size) + tid = 42 + size + lib._set_type_id(o, tid) + return o + def stm_allocate(size): o = lib.stm_allocate(size) tid = 42 + size diff --git a/c7/test/test_smallmalloc.py b/c7/test/test_smallmalloc.py --- a/c7/test/test_smallmalloc.py +++ b/c7/test/test_smallmalloc.py @@ -5,13 +5,22 @@ return int(ffi.cast("uintptr_t", p)) >> 12 -class TestLargeMalloc(BaseTest): +class TestSmallMalloc(BaseTest): + + def setup_method(self, method): + BaseTest.setup_method(self, method) + @ffi.callback("bool(char *)") + def keep(data): + return data in self.keep_me + lib._stm_smallmalloc_keep = keep + self._keepalive_keep_function = keep + self.keep_me = set() def test_simple_uniform(self): - page0 = [stm_allocate_old(16) for i in range(0, 4096, 16)] + page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] assert len(set(map(pageof, page0))) == 1 # - page1 = [stm_allocate_old(16) for i in range(0, 4096, 16)] + page1 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] assert len(set(map(pageof, page1))) == 1 # assert len(set(map(pageof, page0 + page1))) == 2 @@ -19,14 +28,14 @@ def test_different_sizes_different_pages(self): seen = [] for i in range(2, GC_N_SMALL_REQUESTS): - p = pageof(stm_allocate_old(8 * i)) + p = pageof(stm_allocate_old_small(8 * i)) assert p not in seen seen.append(p) for i in range(2, GC_N_SMALL_REQUESTS): - p = pageof(stm_allocate_old(8 * i)) + p = pageof(stm_allocate_old_small(8 * i)) assert p == seen[0] seen.pop(0) def test_sweep_freeing(self): - p1 = stm_allocate_old(16) + p1 = stm_allocate_old_small(16) lib._stm_smallmalloc_sweep() From noreply at buildbot.pypy.org Sat Apr 5 22:37:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 22:37:37 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: Better test, bug fixes Message-ID: <20140405203737.4D8871D23C0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1133:0e454afc4c81 Date: 2014-04-05 22:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/0e454afc4c81/ Log: Better test, bug fixes diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -175,7 +175,10 @@ TS_INEVITABLE, }; -static char *stm_object_pages; +#ifndef STM_TESTS +static +#endif + char *stm_object_pages; static stm_thread_local_t *stm_all_thread_locals = NULL; static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c --- a/c7/stm/smallmalloc.c +++ b/c7/stm/smallmalloc.c @@ -221,7 +221,7 @@ flprev = fl; fl = fl->next; } - else if (_smallmalloc_sweep_keep(p)) { + else if (!_smallmalloc_sweep_keep(p)) { /* the location should be freed now */ if (flprev == NULL) { flprev = (struct small_free_loc_s *)p; @@ -231,7 +231,8 @@ else { assert(flprev->next == fl); flprev->next = (struct small_free_loc_s *)p; - flprev->next->next = fl; + flprev = (struct small_free_loc_s *)p; + flprev->next = fl; } } else { diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -138,6 +138,7 @@ uint64_t _stm_total_allocated(void); void _stm_mutex_pages_lock(void); void _stm_mutex_pages_unlock(void); +char *stm_object_pages; #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -29,6 +29,8 @@ ...; } stm_thread_local_t; +char *stm_object_pages; + void stm_read(object_t *obj); /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); diff --git a/c7/test/test_smallmalloc.py b/c7/test/test_smallmalloc.py --- a/c7/test/test_smallmalloc.py +++ b/c7/test/test_smallmalloc.py @@ -1,4 +1,5 @@ from support import * +import random def pageof(p): @@ -11,10 +12,13 @@ BaseTest.setup_method(self, method) @ffi.callback("bool(char *)") def keep(data): - return data in self.keep_me + p = ffi.cast("object_t *", data - lib.stm_object_pages) + self.has_been_asked_for.append(p) + return p in self.keep_me lib._stm_smallmalloc_keep = keep self._keepalive_keep_function = keep self.keep_me = set() + self.has_been_asked_for = [] def test_simple_uniform(self): page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] @@ -36,6 +40,29 @@ assert p == seen[0] seen.pop(0) - def test_sweep_freeing(self): + def test_sweep_freeing_simple(self): p1 = stm_allocate_old_small(16) lib._stm_smallmalloc_sweep() + + def test_sweep_freeing_random_subset(self): + for i in range(50): + page0 = [stm_allocate_old_small(16) for i in range(0, 4096-16, 16)] + assert len(set(map(pageof, page0))) == 1 + tid = lib._get_type_id(page0[0]) + while len(page0) > 0: + self.keep_me = set(random.sample(page0, len(page0) // 2)) + self.has_been_asked_for = [] + lib._stm_smallmalloc_sweep() + assert sorted(page0) == self.has_been_asked_for + page0r = [] + for p in page0: + if p in self.keep_me: + assert lib._get_type_id(p) == tid + page0r.append(p) + else: + assert lib._get_type_id(p) != tid + page0 = page0r + if len(page0) > 10: + p = stm_allocate_old_small(16) + assert pageof(p) == pageof(page0[0]) + page0.append(p) From noreply at buildbot.pypy.org Sat Apr 5 22:46:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Apr 2014 22:46:48 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: Finish the first version Message-ID: <20140405204648.114771C10C6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1134:26a9bd774594 Date: 2014-04-05 22:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/26a9bd774594/ Log: Finish the first version diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c --- a/c7/stm/smallmalloc.c +++ b/c7/stm/smallmalloc.c @@ -193,20 +193,21 @@ #endif } -void sweep_small_page_full(char *page, long szword) +static char *getbaseptr(struct small_free_loc_s *fl) { - abort(); + return (char *)(((uintptr_t)fl) & ~4095); } -void sweep_small_page_partial(struct small_free_loc_s *page, long szword) +void sweep_small_page(char *baseptr, struct small_free_loc_s *page_free, + long szword) { - check_order_inside_small_page(page); + if (page_free != NULL) + check_order_inside_small_page(page_free); /* for every non-free location, ask if we must free it */ - char *baseptr = (char *)(((uintptr_t)page) & ~4095); uintptr_t i, size = szword * 8; - bool any_object_remaining = false; - struct small_free_loc_s *fl = page; + bool any_object_remaining = false, any_object_dying = false; + struct small_free_loc_s *fl = page_free; struct small_free_loc_s *flprev = NULL; /* XXX could optimize for the case where all objects die: we don't @@ -220,13 +221,14 @@ /* location is already free */ flprev = fl; fl = fl->next; + any_object_dying = true; } else if (!_smallmalloc_sweep_keep(p)) { /* the location should be freed now */ if (flprev == NULL) { flprev = (struct small_free_loc_s *)p; flprev->next = fl; - page = flprev; + page_free = flprev; } else { assert(flprev->next == fl); @@ -234,19 +236,23 @@ flprev = (struct small_free_loc_s *)p; flprev->next = fl; } + any_object_dying = true; } else { any_object_remaining = true; } } - if (any_object_remaining) { - check_order_inside_small_page(page); - page->nextpage = small_page_lists[szword]; - small_page_lists[szword] = page; + if (!any_object_remaining) { + ((struct small_free_loc_s *)baseptr)->nextpage = free_uniform_pages; + free_uniform_pages = (struct small_free_loc_s *)baseptr; + } + else if (!any_object_dying) { + get_fpsz(baseptr)->sz = szword; } else { - ((struct small_free_loc_s *)baseptr)->nextpage = free_uniform_pages; - free_uniform_pages = (struct small_free_loc_s *)baseptr; + check_order_inside_small_page(page_free); + page_free->nextpage = small_page_lists[szword]; + small_page_lists[szword] = page_free; } } @@ -269,7 +275,7 @@ fpsz_t *fpsz = get_fpsz((char *)*fl); assert(fpsz->sz == szword); fpsz->sz = 0; - sweep_small_page_partial(*fl, szword); + sweep_small_page(getbaseptr(*fl), *fl, szword); *fl = NULL; } } @@ -280,7 +286,7 @@ corresponding full_pages_object_size[] entry is 0 */ assert(get_fpsz((char *)page)->sz == 0); nextpage = page->nextpage; - sweep_small_page_partial(page, szword); + sweep_small_page(getbaseptr(page), page, szword); page = nextpage; } } @@ -293,8 +299,10 @@ PAGE_SMSIZE_START]; fpsz_t *fpsz; for (fpsz = fpsz_start; fpsz < fpsz_end; fpsz++, pageptr += 4096) { - if (fpsz->sz != 0) { - sweep_small_page_full(pageptr, fpsz->sz); + uint8_t sz = fpsz->sz; + if (sz != 0) { + fpsz->sz = 0; + sweep_small_page(pageptr, NULL, sz); } } } diff --git a/c7/test/test_smallmalloc.py b/c7/test/test_smallmalloc.py --- a/c7/test/test_smallmalloc.py +++ b/c7/test/test_smallmalloc.py @@ -46,7 +46,7 @@ def test_sweep_freeing_random_subset(self): for i in range(50): - page0 = [stm_allocate_old_small(16) for i in range(0, 4096-16, 16)] + page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] assert len(set(map(pageof, page0))) == 1 tid = lib._get_type_id(page0[0]) while len(page0) > 0: @@ -66,3 +66,11 @@ p = stm_allocate_old_small(16) assert pageof(p) == pageof(page0[0]) page0.append(p) + + def test_sweep_full_page_remains_full(self): + page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)] + tid = lib._get_type_id(page0[0]) + self.keep_me = set(page0) + lib._stm_smallmalloc_sweep() + for p in page0: + assert lib._get_type_id(p) == tid From noreply at buildbot.pypy.org Sun Apr 6 11:19:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 11:19:26 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: Fixes Message-ID: <20140406091926.14DA01C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1135:19703dd56de4 Date: 2014-04-05 23:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/19703dd56de4/ Log: Fixes diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -84,7 +84,6 @@ the common case. Otherwise, we need to compute it based on its location and size. */ if (is_small_uniform(obj)) { - abort(); page_privatize(first_page); } else { @@ -336,17 +335,50 @@ uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + long i, myself = STM_SEGMENT->segment_num; if (is_small_uniform(obj)) { - abort();//XXX WRITE THE FAST CASE + /* First copy the object into the shared page, if needed */ + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + ssize_t obj_size = 0; /* computed lazily, only if needed */ + + if (is_private_page(myself, first_page)) { + obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + memcpy(dst, src, obj_size); + } + else { + assert(memcmp(dst, src, /* already identical */ + stmcb_size_rounded_up((struct object_s *)realobj)) == 0); + } + + for (i = 1; i <= NB_SEGMENTS; i++) { + if (i == myself) + continue; + + src = REAL_ADDRESS(stm_object_pages, start); + dst = REAL_ADDRESS(get_segment_base(i), start); + if (is_private_page(i, first_page)) { + /* The page is a private page. We need to diffuse this + object from the shared page to this private page. */ + if (obj_size == 0) { + obj_size = + stmcb_size_rounded_up((struct object_s *)src); + } + memcpy(dst, src, obj_size); + } + else { + assert(memcmp(dst, src, /* already identical */ + stmcb_size_rounded_up((struct object_s *)src)) == 0); + } + } } else { - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); assert(obj_size >= 16); uintptr_t end = start + obj_size; uintptr_t last_page = (end - 1) / 4096UL; - long i, myself = STM_SEGMENT->segment_num; for (; first_page <= last_page; first_page++) { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -64,7 +64,6 @@ /* takes a normal pointer to a thread-local pointer to an object */ object_t *obj = *pobj; object_t *nobj; - uintptr_t nobj_sync_now; if (obj == NULL) return; @@ -93,31 +92,28 @@ obj->stm_flags &= ~GCFLAG_HAS_SHADOW; realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size = stmcb_size_rounded_up((struct object_s *)realobj); - goto handle_large_object; } } - /* We need to make a copy of this object. It goes either in - a largemalloc.c-managed area, or if it's small enough, in - one of the small uniform pages from gcpage.c. - */ - realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - size = stmcb_size_rounded_up((struct object_s *)realobj); + else { + /* We need to make a copy of this object. It goes either in + a largemalloc.c-managed area, or if it's small enough, in + one of the small uniform pages from gcpage.c. + */ + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size = stmcb_size_rounded_up((struct object_s *)realobj); - if (size > GC_LAST_SMALL_SIZE) { + if (size > GC_LAST_SMALL_SIZE) { - /* case 1: object is not small enough. - Ask gcpage.c for an allocation via largemalloc. */ - char *allocated = allocate_outside_nursery_large(size); - nobj = (object_t *)(allocated - stm_object_pages); - - handle_large_object: - nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; - } - else { - /* case "small enough" */ - char *allocated = allocate_outside_nursery_small(size); - nobj = (object_t *)(allocated - stm_object_pages); - nobj_sync_now = (uintptr_t)nobj; + /* case 1: object is not small enough. + Ask gcpage.c for an allocation via largemalloc. */ + char *allocated = allocate_outside_nursery_large(size); + nobj = (object_t *)(allocated - stm_object_pages); + } + else { + /* case "small enough" */ + char *allocated = allocate_outside_nursery_small(size); + nobj = (object_t *)(allocated - stm_object_pages); + } } /* Copy the object */ @@ -140,7 +136,6 @@ /* a young object outside the nursery */ nobj = obj; tree_delete_item(STM_PSEGMENT->young_outside_nursery, (uintptr_t)nobj); - nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; } /* Set the overflow_number if nedeed */ @@ -150,8 +145,8 @@ } /* Must trace the object later */ + uintptr_t nobj_sync_now = (uintptr_t)nobj | !is_small_uniform(nobj); LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); - assert(nobj_sync_now == ((uintptr_t)nobj | is_small_uniform(nobj))); } static void collect_roots_in_nursery(void) From noreply at buildbot.pypy.org Sun Apr 6 11:19:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 11:19:27 +0200 (CEST) Subject: [pypy-commit] stmgc default: Oups! Message-ID: <20140406091927.456A01C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1136:58cdbd64670b Date: 2014-04-06 11:19 +0200 http://bitbucket.org/pypy/stmgc/changeset/58cdbd64670b/ Log: Oups! diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -104,8 +104,8 @@ static int compare_chunks(const void *vchunk1, const void *vchunk2) { /* sort by size */ - const mchunk_t *chunk1 = (const mchunk_t *)vchunk1; - const mchunk_t *chunk2 = (const mchunk_t *)vchunk2; + mchunk_t *chunk1 = *(mchunk_t *const *)vchunk1; + mchunk_t *chunk2 = *(mchunk_t *const *)vchunk2; if (chunk1->size < chunk2->size) return -1; if (chunk1->size == chunk2->size) From noreply at buildbot.pypy.org Sun Apr 6 11:35:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 11:35:01 +0200 (CEST) Subject: [pypy-commit] stmgc default: Tweak to largemalloc.c: like glibc's malloc, organize two chained Message-ID: <20140406093501.E58351C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1137:91b7fc7081c8 Date: 2014-04-06 11:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/91b7fc7081c8/ Log: Tweak to largemalloc.c: like glibc's malloc, organize two chained lists instead of only one, to guarantee progress in malloc() even in the face of very large numbers of free slots that all have a sightly-too-small, identical size. diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -17,7 +17,7 @@ H_FILES = ../stmgc.h ../stm/*.h C_FILES = ../stmgc.c ../stm/*.c -COMMON = -I.. -pthread -lrt -g -Wall -Werror +COMMON = -I.. -pthread -lrt -g -Wall -Werror -DSTM_LARGEMALLOC_TEST # note that 'build' is partially optimized but still contains all asserts diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_largemalloc.c @@ -0,0 +1,72 @@ +#include +#include +#include +#include + +#include "stmgc.h" +#include "../stm/largemalloc.h" + +static inline double get_stm_time(void) +{ + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + return tp.tv_sec + tp.tv_nsec * 0.000000001; +} + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + abort(); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + abort(); +} + +/************************************************************/ + +#define ARENA_SIZE (1024*1024*1024) + +static char *arena_data; +extern bool (*_stm_largemalloc_keep)(char *data); /* a hook for tests */ +void _stm_mutex_pages_lock(void); + + +static bool keep_me(char *data) { + static bool last_answer = false; + last_answer = !last_answer; + return last_answer; +} + +void timing(int scale) +{ + long limit = 1L << scale; + _stm_largemalloc_init_arena(arena_data, ARENA_SIZE); + double start = get_stm_time(); + + long i; + for (i = 0; i < limit; i++) { + _stm_large_malloc(16 + 8 * (i % 4)); /* may return NULL */ + } + _stm_largemalloc_keep = keep_me; + _stm_largemalloc_sweep(); + for (i = 0; i < limit; i++) { + _stm_large_malloc(16 + 8 * (i % 4)); /* may return NULL */ + } + + double stop = get_stm_time(); + printf("scale %2d: %.9f\n", scale, stop - start); +} + + + +int main(void) +{ + int i; + arena_data = malloc(ARENA_SIZE); + assert(arena_data != NULL); + _stm_mutex_pages_lock(); + for (i = 0; i < 25; i++) + timing(i); + return 0; +} diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -20,19 +20,25 @@ #define LAST_BIN_INDEX(sz) ((sz) >= (3 << 18)) typedef struct dlist_s { - struct dlist_s *next; /* a doubly-linked list */ + struct dlist_s *next; /* a circular doubly-linked list */ struct dlist_s *prev; } dlist_t; +typedef struct ulist_s { + struct ulist_s *up; /* a non-circular doubly-linked list */ + struct ulist_s *down; +} ulist_t; + typedef struct malloc_chunk { size_t prev_size; /* - if the previous chunk is free: size of its data - otherwise, if this chunk is free: 1 - otherwise, 0. */ - size_t size; /* size of the data in this chunk, - plus optionally the FLAG_SORTED */ + size_t size; /* size of the data in this chunk */ - dlist_t d; /* if free: a doubly-linked list */ + dlist_t d; /* if free: a doubly-linked list 'largebins' */ /* if not free: the user data starts here */ + ulist_t u; /* if free, if unsorted: up==UU_UNSORTED + if free, if sorted: a doubly-linked list */ /* The chunk has a total size of 'size'. It is immediately followed in memory by another chunk. This list ends with the last "chunk" @@ -41,7 +47,7 @@ one are considered "not free". */ } mchunk_t; -#define FLAG_SORTED 1 +#define UU_UNSORTED ((ulist_t *) 1) #define THIS_CHUNK_FREE 1 #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) @@ -49,21 +55,13 @@ #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) +#define updown2chunk(p) chunk_at_offset(p, \ + -(CHUNK_HEADER_SIZE + sizeof(dlist_t))) -static mchunk_t *next_chunk_s(mchunk_t *p) +static mchunk_t *next_chunk(mchunk_t *p) { - assert(p->size & FLAG_SORTED); - return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size - FLAG_SORTED); -} -static mchunk_t *next_chunk_u(mchunk_t *p) -{ - assert(!(p->size & FLAG_SORTED)); return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size); } -static mchunk_t *next_chunk_a(mchunk_t *p) -{ - return chunk_at_offset(p, CHUNK_HEADER_SIZE + (p->size & ~FLAG_SORTED)); -} /* The free chunks are stored in "bins". Each bin is a doubly-linked @@ -76,14 +74,35 @@ neighbors to ensure this. In each bin's doubly-linked list, chunks are sorted by their size in - decreasing order (if you start from 'd.next'). At the end of this - list are some unsorted chunks. All unsorted chunks are after all - sorted chunks. The flag 'FLAG_SORTED' distinguishes them. + decreasing order (if you follow 'largebins[n].next', + 'largebins[n].next->next', etc.). At the end of this list are some + unsorted chunks. All unsorted chunks are after all sorted chunks. + Unsorted chunks are distinguished by having 'u.up == UU_UNSORTED'. Note that if the user always calls large_malloc() with a large enough argument, then the few bins corresponding to smaller values will never be sorted at all. They are still populated with the fragments of space between bigger allocations. + + Following the 'd' linked list, we get only one chunk of every size. + The additional chunks of a given size are linked "vertically" in + the secondary 'u' doubly-linked list. + + + +-----+ + | 296 | + +-----+ + ^ | + | v + +-----+ +-----+ + | 296 | | 288 | + +-----+ +-----+ + ^ | ^ | UU_UNSORTED + | v | v | + largebins +-----+ +-----+ +-----+ +-----+ largebins + [4].next <-> | 304 | <-> | 296 | <-> | 288 | <-> | 296 | <-> [4].prev + +-----+ +-----+ +-----+ +-----+ + */ static dlist_t largebins[N_BINS]; @@ -97,8 +116,9 @@ new->d.next = &largebins[index]; new->d.prev = largebins[index].prev; new->d.prev->next = &new->d; + new->u.up = UU_UNSORTED; + new->u.down = NULL; largebins[index].prev = &new->d; - assert(!(new->size & FLAG_SORTED)); } static int compare_chunks(const void *vchunk1, const void *vchunk2) @@ -120,7 +140,7 @@ dlist_t *end = &largebins[index]; dlist_t *scan = unsorted->prev; size_t count = 1; - while (scan != end && !(data2chunk(scan)->size & FLAG_SORTED)) { + while (scan != end && data2chunk(scan)->u.up == UU_UNSORTED) { scan = scan->prev; ++count; } @@ -144,41 +164,90 @@ chunk1 = chunks[--count]; } - chunk1->size |= FLAG_SORTED; size_t search_size = chunk1->size; dlist_t *head = largebins[index].next; while (1) { - if (head == end || search_size >= data2chunk(head)->size) { + if (head == end || data2chunk(head)->size < search_size) { /* insert 'chunk1' here, before the current head */ head->prev->next = &chunk1->d; chunk1->d.prev = head->prev; head->prev = &chunk1->d; chunk1->d.next = head; - if (count == 0) - break; /* all done */ - chunk1 = chunks[--count]; - chunk1->size |= FLAG_SORTED; - search_size = chunk1->size; + chunk1->u.up = NULL; + chunk1->u.down = NULL; + head = &chunk1->d; + } + else if (data2chunk(head)->size == search_size) { + /* insert 'chunk1' vertically in the 'u' list */ + ulist_t *uhead = &data2chunk(head)->u; + chunk1->u.up = uhead->up; + chunk1->u.down = uhead; + if (uhead->up != NULL) + uhead->up->down = &chunk1->u; + uhead->up = &chunk1->u; +#ifndef NDEBUG + chunk1->d.next = (dlist_t *)0x42; /* not used */ + chunk1->d.prev = (dlist_t *)0x42; +#endif } else { head = head->next; + continue; } + if (count == 0) + break; /* all done */ + chunk1 = chunks[--count]; + search_size = chunk1->size; } } static void sort_bin(size_t index) { dlist_t *last = largebins[index].prev; - if (last != &largebins[index] && !(data2chunk(last)->size & FLAG_SORTED)) + if (last != &largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) really_sort_bin(index); } +static void unlink_chunk(mchunk_t *mscan) +{ + if (mscan->u.down != NULL) { + /* unlink mscan from the vertical list 'u' */ + ulist_t *up = mscan->u.up; + ulist_t *down = mscan->u.down; + down->up = up; + if (up != NULL) up->down = down; + } + else { + dlist_t *prev = mscan->d.prev; + dlist_t *next = mscan->d.next; + if (mscan->u.up == NULL || mscan->u.up == UU_UNSORTED) { + /* unlink mscan from the doubly-linked list 'd' */ + next->prev = prev; + prev->next = next; + } + else { + /* relink in the 'd' list the item above me */ + mchunk_t *above = updown2chunk(mscan->u.up); + next->prev = &above->d; + prev->next = &above->d; + above->d.next = next; + above->d.prev = prev; + above->u.down = NULL; + } + } +} + char *_stm_large_malloc(size_t request_size) { /* 'request_size' should already be a multiple of the word size here */ assert((request_size & (sizeof(char *)-1)) == 0); + /* it can be very small, but we need to ensure a minimal size + (currently 32 bytes) */ + if (request_size < sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) + request_size = sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE; + size_t index = largebin_index(request_size); sort_bin(index); @@ -190,9 +259,11 @@ while (scan != end) { mscan = data2chunk(scan); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert(next_chunk_s(mscan)->prev_size == mscan->size - FLAG_SORTED); + assert(next_chunk(mscan)->prev_size == mscan->size); + assert(IMPLY(mscan->d.prev != end, + data2chunk(mscan->d.prev)->size > mscan->size)); - if (mscan->size > request_size) + if (mscan->size >= request_size) goto found; scan = mscan->d.prev; } @@ -205,7 +276,6 @@ /* non-empty bin. */ sort_bin(index); scan = largebins[index].prev; - end = &largebins[index]; mscan = data2chunk(scan); goto found; } @@ -215,17 +285,26 @@ return NULL; found: - assert(mscan->size & FLAG_SORTED); - assert(mscan->size > request_size); + assert(mscan->size >= request_size); + assert(mscan->u.up != UU_UNSORTED); - /* unlink mscan from the doubly-linked list */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + if (mscan->u.up != NULL) { + /* fast path: grab the item that is just above, to avoid needing + to rearrange the 'd' list */ + mchunk_t *above = updown2chunk(mscan->u.up); + ulist_t *two_above = above->u.up; + mscan->u.up = two_above; + if (two_above != NULL) two_above->down = &mscan->u; + mscan = above; + } + else { + unlink_chunk(mscan); + } - size_t remaining_size_plus_1 = mscan->size - request_size; - if (remaining_size_plus_1 <= sizeof(struct malloc_chunk)) { - next_chunk_s(mscan)->prev_size = BOTH_CHUNKS_USED; - request_size = mscan->size & ~FLAG_SORTED; + size_t remaining_size = mscan->size - request_size; + if (remaining_size < sizeof(struct malloc_chunk)) { + next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; + request_size = mscan->size; } else { /* only part of the chunk is being used; reduce the size @@ -234,9 +313,9 @@ mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + request_size); new->prev_size = THIS_CHUNK_FREE; - size_t remaining_size = remaining_size_plus_1 - 1 - CHUNK_HEADER_SIZE; - new->size = remaining_size; - next_chunk_u(new)->prev_size = remaining_size; + size_t remaining_data_size = remaining_size - CHUNK_HEADER_SIZE; + new->size = remaining_data_size; + next_chunk(new)->prev_size = remaining_data_size; insert_unsorted(new); } mscan->size = request_size; @@ -262,17 +341,15 @@ mchunk_t *mscan = chunk_at_offset(chunk, msize); if (mscan->prev_size == BOTH_CHUNKS_USED) { - assert((mscan->size & ((sizeof(char *) - 1) & ~FLAG_SORTED)) == 0); + assert((mscan->size & (sizeof(char *) - 1)) == 0); mscan->prev_size = chunk->size; } else { - mscan->size &= ~FLAG_SORTED; size_t fsize = mscan->size; mchunk_t *fscan = chunk_at_offset(mscan, fsize + CHUNK_HEADER_SIZE); /* unlink the following chunk */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + unlink_chunk(mscan); #ifndef NDEBUG mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ @@ -296,15 +373,14 @@ msize = chunk->prev_size + CHUNK_HEADER_SIZE; mscan = chunk_at_offset(chunk, -msize); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert((mscan->size & ~FLAG_SORTED) == chunk->prev_size); + assert(mscan->size == chunk->prev_size); /* unlink the previous chunk */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + unlink_chunk(mscan); /* merge the two chunks */ mscan->size = msize + chunk->size; - next_chunk_u(mscan)->prev_size = mscan->size; + next_chunk(mscan)->prev_size = mscan->size; assert(chunk->prev_size = (size_t)-1); assert(chunk->size = (size_t)-1); @@ -319,13 +395,15 @@ { char *data = ((char *)first_chunk) + 16; size_t prev_size_if_free = 0; + fprintf(stderr, "\n"); while (1) { - fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + assert((((uintptr_t)data) & 7) == 0); /* alignment */ + fprintf(stderr, "[ %p: %zu", data - 16, *(size_t*)(data - 16)); if (prev_size_if_free == 0) { assert(*(size_t*)(data - 16) == THIS_CHUNK_FREE || *(size_t*)(data - 16) == BOTH_CHUNKS_USED); if (*(size_t*)(data - 16) == THIS_CHUNK_FREE) - prev_size_if_free = (*(size_t*)(data - 8)) & ~FLAG_SORTED; + prev_size_if_free = (*(size_t*)(data - 8)); } else { assert(*(size_t*)(data - 16) == prev_size_if_free); @@ -333,21 +411,23 @@ } if (*(size_t*)(data - 8) == END_MARKER) break; - fprintf(stderr, " %p: %zu ]", data - 8, *(size_t*)(data - 8)); if (prev_size_if_free) { - fprintf(stderr, " (free %p / %p)\n", - *(void **)data, *(void **)(data + 8)); + fprintf(stderr, " \t(up %p / down %p)", + *(void **)(data + 16), *(void **)(data + 24)); + } + fprintf(stderr, "\n %p: %zu ]", data - 8, *(size_t*)(data - 8)); + if (prev_size_if_free) { + fprintf(stderr, "\t(prev %p <-> next %p)\n", + *(void **)(data + 8), *(void **)data); } else { fprintf(stderr, "\n"); } - if (!prev_size_if_free) - assert(!((*(size_t*)(data - 8)) & FLAG_SORTED)); assert(*(ssize_t*)(data - 8) >= 16); - data += (*(size_t*)(data - 8)) & ~FLAG_SORTED; + data += *(size_t*)(data - 8); data += 16; } - fprintf(stderr, " %p: end. ]\n\n", data - 8); + fprintf(stderr, "\n %p: end. ]\n\n", data - 8); assert(data - 16 == (char *)last_chunk); } @@ -356,7 +436,7 @@ return (char *)first_chunk; } -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST bool (*_stm_largemalloc_keep)(char *data); /* a hook for tests */ #endif @@ -376,11 +456,11 @@ last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE); last_chunk->prev_size = first_chunk->size; last_chunk->size = END_MARKER; - assert(last_chunk == next_chunk_u(first_chunk)); + assert(last_chunk == next_chunk(first_chunk)); insert_unsorted(first_chunk); -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST _stm_largemalloc_keep = NULL; #endif } @@ -410,11 +490,10 @@ return 0; /* unlink the prev_chunk from the doubly-linked list */ - prev_chunk->d.next->prev = prev_chunk->d.prev; - prev_chunk->d.prev->next = prev_chunk->d.next; + unlink_chunk(prev_chunk); /* reduce the prev_chunk */ - assert((prev_chunk->size & ~FLAG_SORTED) == last_chunk->prev_size); + assert(prev_chunk->size == last_chunk->prev_size); prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk - CHUNK_HEADER_SIZE; @@ -422,7 +501,7 @@ new_last_chunk->prev_size = prev_chunk->size; new_last_chunk->size = END_MARKER; last_chunk = new_last_chunk; - assert(last_chunk == next_chunk_u(prev_chunk)); + assert(last_chunk == next_chunk(prev_chunk)); insert_unsorted(prev_chunk); } @@ -433,7 +512,7 @@ new_last_chunk->prev_size = BOTH_CHUNKS_USED; new_last_chunk->size = END_MARKER; last_chunk = new_last_chunk; - assert(last_chunk == next_chunk_u(old_last_chunk)); + assert(last_chunk == next_chunk(old_last_chunk)); /* then free the last_chunk (turn it from "used" to "free) */ _stm_large_free((char *)&old_last_chunk->d); @@ -444,7 +523,7 @@ static inline bool _largemalloc_sweep_keep(mchunk_t *chunk) { -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST if (_stm_largemalloc_keep != NULL) return _stm_largemalloc_keep((char *)&chunk->d); #endif @@ -459,7 +538,7 @@ mchunk_t *mnext, *chunk = first_chunk; if (chunk->prev_size == THIS_CHUNK_FREE) - chunk = next_chunk_a(chunk); /* go to the first non-free chunk */ + chunk = next_chunk(chunk); /* go to the first non-free chunk */ while (chunk != last_chunk) { @@ -467,9 +546,9 @@ assert(chunk->prev_size != THIS_CHUNK_FREE); /* first figure out the next non-free chunk */ - mnext = next_chunk_u(chunk); + mnext = next_chunk(chunk); if (mnext->prev_size == THIS_CHUNK_FREE) - mnext = next_chunk_a(mnext); + mnext = next_chunk(mnext); /* use the callback to know if 'chunk' contains an object that survives or dies */ diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -80,7 +80,9 @@ mutex_pages_unlock(); return result; } +#endif +#ifdef STM_LARGEMALLOC_TEST void _stm_mutex_pages_lock(void) { mutex_pages_lock(); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -62,3 +62,5 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } + +void _stm_mutex_pages_lock(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -274,6 +274,7 @@ ''', sources=source_files, define_macros=[('STM_TESTS', '1'), + ('STM_LARGEMALLOC_TEST', '1'), ('STM_NO_COND_WAIT', '1'), ('STM_DEBUGPRINT', '1'), ('GC_N_SMALL_REQUESTS', str(GC_N_SMALL_REQUESTS)), #check diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -17,7 +17,10 @@ lib._stm_mutex_pages_lock() # for this file def test_simple(self): + # + lib._stm_large_dump() d1 = lib._stm_large_malloc(7000) + lib._stm_large_dump() d2 = lib._stm_large_malloc(8000) print d1 print d2 @@ -70,7 +73,7 @@ lib._stm_large_dump() def test_resize_arena_reduce_2(self): - lib._stm_large_malloc(self.size // 2 - 64) + lib._stm_large_malloc(self.size // 2 - 80) r = lib._stm_largemalloc_resize_arena(self.size // 2) assert r == 1 lib._stm_large_dump() @@ -120,7 +123,7 @@ p.append((d, sz, content1, content2)) lib._stm_large_dump() - def test_random_largemalloc_sweep(self): + def test_random_largemalloc_sweep(self, constrained_size_range=False): @ffi.callback("bool(char *)") def keep(data): try: @@ -138,7 +141,11 @@ r = random.Random(1000) for j in range(500): - sizes = [random.choice(range(104, 500, 8)) for i in range(20)] + if constrained_size_range: + max = 120 + else: + max = 500 + sizes = [random.choice(range(104, max, 8)) for i in range(20)] all = [lib._stm_large_malloc(size) for size in sizes] print all @@ -170,3 +177,6 @@ assert all[i][50] == chr(65 + i) else: assert all_orig[i][50] == '\xDE' + + def test_random_largemalloc_sweep_constrained_size_range(self): + self.test_random_largemalloc_sweep(constrained_size_range=True) From noreply at buildbot.pypy.org Sun Apr 6 11:49:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 11:49:01 +0200 (CEST) Subject: [pypy-commit] stmgc default: Don't use an unbounded variable-sized array in the stack. Instead, Message-ID: <20140406094901.8813C1C0110@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1138:5dbd50990e2c Date: 2014-04-06 11:48 +0200 http://bitbucket.org/pypy/stmgc/changeset/5dbd50990e2c/ Log: Don't use an unbounded variable-sized array in the stack. Instead, use a fixed-sized one and allocate in the heap if it's not enough. diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -134,6 +134,8 @@ return +1; } +#define MAX_STACK_COUNT 64 + static void really_sort_bin(size_t index) { dlist_t *unsorted = largebins[index].prev; @@ -148,12 +150,20 @@ scan->next = end; mchunk_t *chunk1; - mchunk_t *chunks[count]; /* dynamically-sized */ + mchunk_t *chunk_array[MAX_STACK_COUNT]; + mchunk_t **chunks = chunk_array; + if (count == 1) { chunk1 = data2chunk(unsorted); /* common case */ count = 0; } else { + if (count > MAX_STACK_COUNT) { + chunks = malloc(count * sizeof(mchunk_t *)); + if (chunks == NULL) { + stm_fatalerror("out of memory"); // XXX + } + } size_t i; for (i = 0; i < count; i++) { chunks[i] = data2chunk(unsorted); @@ -200,6 +210,9 @@ chunk1 = chunks[--count]; search_size = chunk1->size; } + + if (chunks != chunk_array) + free(chunks); } static void sort_bin(size_t index) From noreply at buildbot.pypy.org Sun Apr 6 11:51:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 11:51:39 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/5dbd50990e2c Message-ID: <20140406095139.166361C0110@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70474:367f5b30eff7 Date: 2014-04-06 11:50 +0200 http://bitbucket.org/pypy/pypy/changeset/367f5b30eff7/ Log: import stmgc/5dbd50990e2c diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -ba29f5ab1dcd +5dbd50990e2c diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -294,8 +294,7 @@ break; /* done */ pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; endpagenum = NB_PAGES; - if (pagenum == endpagenum) - break; /* no pages in the 2nd section, so done too */ + continue; } page_check_and_reshare(pagenum); diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c b/rpython/translator/stm/src_stm/stm/largemalloc.c --- a/rpython/translator/stm/src_stm/stm/largemalloc.c +++ b/rpython/translator/stm/src_stm/stm/largemalloc.c @@ -21,19 +21,25 @@ #define LAST_BIN_INDEX(sz) ((sz) >= (3 << 18)) typedef struct dlist_s { - struct dlist_s *next; /* a doubly-linked list */ + struct dlist_s *next; /* a circular doubly-linked list */ struct dlist_s *prev; } dlist_t; +typedef struct ulist_s { + struct ulist_s *up; /* a non-circular doubly-linked list */ + struct ulist_s *down; +} ulist_t; + typedef struct malloc_chunk { size_t prev_size; /* - if the previous chunk is free: size of its data - otherwise, if this chunk is free: 1 - otherwise, 0. */ - size_t size; /* size of the data in this chunk, - plus optionally the FLAG_SORTED */ + size_t size; /* size of the data in this chunk */ - dlist_t d; /* if free: a doubly-linked list */ + dlist_t d; /* if free: a doubly-linked list 'largebins' */ /* if not free: the user data starts here */ + ulist_t u; /* if free, if unsorted: up==UU_UNSORTED + if free, if sorted: a doubly-linked list */ /* The chunk has a total size of 'size'. It is immediately followed in memory by another chunk. This list ends with the last "chunk" @@ -42,7 +48,7 @@ one are considered "not free". */ } mchunk_t; -#define FLAG_SORTED 1 +#define UU_UNSORTED ((ulist_t *) 1) #define THIS_CHUNK_FREE 1 #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) @@ -50,21 +56,13 @@ #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) +#define updown2chunk(p) chunk_at_offset(p, \ + -(CHUNK_HEADER_SIZE + sizeof(dlist_t))) -static mchunk_t *next_chunk_s(mchunk_t *p) +static mchunk_t *next_chunk(mchunk_t *p) { - assert(p->size & FLAG_SORTED); - return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size - FLAG_SORTED); -} -static mchunk_t *next_chunk_u(mchunk_t *p) -{ - assert(!(p->size & FLAG_SORTED)); return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size); } -static mchunk_t *next_chunk_a(mchunk_t *p) -{ - return chunk_at_offset(p, CHUNK_HEADER_SIZE + (p->size & ~FLAG_SORTED)); -} /* The free chunks are stored in "bins". Each bin is a doubly-linked @@ -77,14 +75,35 @@ neighbors to ensure this. In each bin's doubly-linked list, chunks are sorted by their size in - decreasing order (if you start from 'd.next'). At the end of this - list are some unsorted chunks. All unsorted chunks are after all - sorted chunks. The flag 'FLAG_SORTED' distinguishes them. + decreasing order (if you follow 'largebins[n].next', + 'largebins[n].next->next', etc.). At the end of this list are some + unsorted chunks. All unsorted chunks are after all sorted chunks. + Unsorted chunks are distinguished by having 'u.up == UU_UNSORTED'. Note that if the user always calls large_malloc() with a large enough argument, then the few bins corresponding to smaller values will never be sorted at all. They are still populated with the fragments of space between bigger allocations. + + Following the 'd' linked list, we get only one chunk of every size. + The additional chunks of a given size are linked "vertically" in + the secondary 'u' doubly-linked list. + + + +-----+ + | 296 | + +-----+ + ^ | + | v + +-----+ +-----+ + | 296 | | 288 | + +-----+ +-----+ + ^ | ^ | UU_UNSORTED + | v | v | + largebins +-----+ +-----+ +-----+ +-----+ largebins + [4].next <-> | 304 | <-> | 296 | <-> | 288 | <-> | 296 | <-> [4].prev + +-----+ +-----+ +-----+ +-----+ + */ static dlist_t largebins[N_BINS]; @@ -98,15 +117,16 @@ new->d.next = &largebins[index]; new->d.prev = largebins[index].prev; new->d.prev->next = &new->d; + new->u.up = UU_UNSORTED; + new->u.down = NULL; largebins[index].prev = &new->d; - assert(!(new->size & FLAG_SORTED)); } static int compare_chunks(const void *vchunk1, const void *vchunk2) { /* sort by size */ - const mchunk_t *chunk1 = (const mchunk_t *)vchunk1; - const mchunk_t *chunk2 = (const mchunk_t *)vchunk2; + mchunk_t *chunk1 = *(mchunk_t *const *)vchunk1; + mchunk_t *chunk2 = *(mchunk_t *const *)vchunk2; if (chunk1->size < chunk2->size) return -1; if (chunk1->size == chunk2->size) @@ -115,13 +135,15 @@ return +1; } +#define MAX_STACK_COUNT 64 + static void really_sort_bin(size_t index) { dlist_t *unsorted = largebins[index].prev; dlist_t *end = &largebins[index]; dlist_t *scan = unsorted->prev; size_t count = 1; - while (scan != end && !(data2chunk(scan)->size & FLAG_SORTED)) { + while (scan != end && data2chunk(scan)->u.up == UU_UNSORTED) { scan = scan->prev; ++count; } @@ -129,12 +151,20 @@ scan->next = end; mchunk_t *chunk1; - mchunk_t *chunks[count]; /* dynamically-sized */ + mchunk_t *chunk_array[MAX_STACK_COUNT]; + mchunk_t **chunks = chunk_array; + if (count == 1) { chunk1 = data2chunk(unsorted); /* common case */ count = 0; } else { + if (count > MAX_STACK_COUNT) { + chunks = malloc(count * sizeof(mchunk_t *)); + if (chunks == NULL) { + stm_fatalerror("out of memory"); // XXX + } + } size_t i; for (i = 0; i < count; i++) { chunks[i] = data2chunk(unsorted); @@ -145,41 +175,93 @@ chunk1 = chunks[--count]; } - chunk1->size |= FLAG_SORTED; size_t search_size = chunk1->size; dlist_t *head = largebins[index].next; while (1) { - if (head == end || search_size >= data2chunk(head)->size) { + if (head == end || data2chunk(head)->size < search_size) { /* insert 'chunk1' here, before the current head */ head->prev->next = &chunk1->d; chunk1->d.prev = head->prev; head->prev = &chunk1->d; chunk1->d.next = head; - if (count == 0) - break; /* all done */ - chunk1 = chunks[--count]; - chunk1->size |= FLAG_SORTED; - search_size = chunk1->size; + chunk1->u.up = NULL; + chunk1->u.down = NULL; + head = &chunk1->d; + } + else if (data2chunk(head)->size == search_size) { + /* insert 'chunk1' vertically in the 'u' list */ + ulist_t *uhead = &data2chunk(head)->u; + chunk1->u.up = uhead->up; + chunk1->u.down = uhead; + if (uhead->up != NULL) + uhead->up->down = &chunk1->u; + uhead->up = &chunk1->u; +#ifndef NDEBUG + chunk1->d.next = (dlist_t *)0x42; /* not used */ + chunk1->d.prev = (dlist_t *)0x42; +#endif } else { head = head->next; + continue; } + if (count == 0) + break; /* all done */ + chunk1 = chunks[--count]; + search_size = chunk1->size; } + + if (chunks != chunk_array) + free(chunks); } static void sort_bin(size_t index) { dlist_t *last = largebins[index].prev; - if (last != &largebins[index] && !(data2chunk(last)->size & FLAG_SORTED)) + if (last != &largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) really_sort_bin(index); } +static void unlink_chunk(mchunk_t *mscan) +{ + if (mscan->u.down != NULL) { + /* unlink mscan from the vertical list 'u' */ + ulist_t *up = mscan->u.up; + ulist_t *down = mscan->u.down; + down->up = up; + if (up != NULL) up->down = down; + } + else { + dlist_t *prev = mscan->d.prev; + dlist_t *next = mscan->d.next; + if (mscan->u.up == NULL || mscan->u.up == UU_UNSORTED) { + /* unlink mscan from the doubly-linked list 'd' */ + next->prev = prev; + prev->next = next; + } + else { + /* relink in the 'd' list the item above me */ + mchunk_t *above = updown2chunk(mscan->u.up); + next->prev = &above->d; + prev->next = &above->d; + above->d.next = next; + above->d.prev = prev; + above->u.down = NULL; + } + } +} + char *_stm_large_malloc(size_t request_size) { /* 'request_size' should already be a multiple of the word size here */ assert((request_size & (sizeof(char *)-1)) == 0); + /* it can be very small, but we need to ensure a minimal size + (currently 32 bytes) */ + if (request_size < sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) + request_size = sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE; + size_t index = largebin_index(request_size); sort_bin(index); @@ -191,9 +273,11 @@ while (scan != end) { mscan = data2chunk(scan); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert(next_chunk_s(mscan)->prev_size == mscan->size - FLAG_SORTED); + assert(next_chunk(mscan)->prev_size == mscan->size); + assert(IMPLY(mscan->d.prev != end, + data2chunk(mscan->d.prev)->size > mscan->size)); - if (mscan->size > request_size) + if (mscan->size >= request_size) goto found; scan = mscan->d.prev; } @@ -206,7 +290,6 @@ /* non-empty bin. */ sort_bin(index); scan = largebins[index].prev; - end = &largebins[index]; mscan = data2chunk(scan); goto found; } @@ -216,17 +299,26 @@ return NULL; found: - assert(mscan->size & FLAG_SORTED); - assert(mscan->size > request_size); + assert(mscan->size >= request_size); + assert(mscan->u.up != UU_UNSORTED); - /* unlink mscan from the doubly-linked list */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + if (mscan->u.up != NULL) { + /* fast path: grab the item that is just above, to avoid needing + to rearrange the 'd' list */ + mchunk_t *above = updown2chunk(mscan->u.up); + ulist_t *two_above = above->u.up; + mscan->u.up = two_above; + if (two_above != NULL) two_above->down = &mscan->u; + mscan = above; + } + else { + unlink_chunk(mscan); + } - size_t remaining_size_plus_1 = mscan->size - request_size; - if (remaining_size_plus_1 <= sizeof(struct malloc_chunk)) { - next_chunk_s(mscan)->prev_size = BOTH_CHUNKS_USED; - request_size = mscan->size & ~FLAG_SORTED; + size_t remaining_size = mscan->size - request_size; + if (remaining_size < sizeof(struct malloc_chunk)) { + next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; + request_size = mscan->size; } else { /* only part of the chunk is being used; reduce the size @@ -235,9 +327,9 @@ mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + request_size); new->prev_size = THIS_CHUNK_FREE; - size_t remaining_size = remaining_size_plus_1 - 1 - CHUNK_HEADER_SIZE; - new->size = remaining_size; - next_chunk_u(new)->prev_size = remaining_size; + size_t remaining_data_size = remaining_size - CHUNK_HEADER_SIZE; + new->size = remaining_data_size; + next_chunk(new)->prev_size = remaining_data_size; insert_unsorted(new); } mscan->size = request_size; @@ -263,17 +355,15 @@ mchunk_t *mscan = chunk_at_offset(chunk, msize); if (mscan->prev_size == BOTH_CHUNKS_USED) { - assert((mscan->size & ((sizeof(char *) - 1) & ~FLAG_SORTED)) == 0); + assert((mscan->size & (sizeof(char *) - 1)) == 0); mscan->prev_size = chunk->size; } else { - mscan->size &= ~FLAG_SORTED; size_t fsize = mscan->size; mchunk_t *fscan = chunk_at_offset(mscan, fsize + CHUNK_HEADER_SIZE); /* unlink the following chunk */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + unlink_chunk(mscan); #ifndef NDEBUG mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ @@ -297,15 +387,14 @@ msize = chunk->prev_size + CHUNK_HEADER_SIZE; mscan = chunk_at_offset(chunk, -msize); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert((mscan->size & ~FLAG_SORTED) == chunk->prev_size); + assert(mscan->size == chunk->prev_size); /* unlink the previous chunk */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + unlink_chunk(mscan); /* merge the two chunks */ mscan->size = msize + chunk->size; - next_chunk_u(mscan)->prev_size = mscan->size; + next_chunk(mscan)->prev_size = mscan->size; assert(chunk->prev_size = (size_t)-1); assert(chunk->size = (size_t)-1); @@ -320,13 +409,15 @@ { char *data = ((char *)first_chunk) + 16; size_t prev_size_if_free = 0; + fprintf(stderr, "\n"); while (1) { - fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + assert((((uintptr_t)data) & 7) == 0); /* alignment */ + fprintf(stderr, "[ %p: %zu", data - 16, *(size_t*)(data - 16)); if (prev_size_if_free == 0) { assert(*(size_t*)(data - 16) == THIS_CHUNK_FREE || *(size_t*)(data - 16) == BOTH_CHUNKS_USED); if (*(size_t*)(data - 16) == THIS_CHUNK_FREE) - prev_size_if_free = (*(size_t*)(data - 8)) & ~FLAG_SORTED; + prev_size_if_free = (*(size_t*)(data - 8)); } else { assert(*(size_t*)(data - 16) == prev_size_if_free); @@ -334,21 +425,23 @@ } if (*(size_t*)(data - 8) == END_MARKER) break; - fprintf(stderr, " %p: %zu ]", data - 8, *(size_t*)(data - 8)); if (prev_size_if_free) { - fprintf(stderr, " (free %p / %p)\n", - *(void **)data, *(void **)(data + 8)); + fprintf(stderr, " \t(up %p / down %p)", + *(void **)(data + 16), *(void **)(data + 24)); + } + fprintf(stderr, "\n %p: %zu ]", data - 8, *(size_t*)(data - 8)); + if (prev_size_if_free) { + fprintf(stderr, "\t(prev %p <-> next %p)\n", + *(void **)(data + 8), *(void **)data); } else { fprintf(stderr, "\n"); } - if (!prev_size_if_free) - assert(!((*(size_t*)(data - 8)) & FLAG_SORTED)); assert(*(ssize_t*)(data - 8) >= 16); - data += (*(size_t*)(data - 8)) & ~FLAG_SORTED; + data += *(size_t*)(data - 8); data += 16; } - fprintf(stderr, " %p: end. ]\n\n", data - 8); + fprintf(stderr, "\n %p: end. ]\n\n", data - 8); assert(data - 16 == (char *)last_chunk); } @@ -357,7 +450,7 @@ return (char *)first_chunk; } -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST bool (*_stm_largemalloc_keep)(char *data); /* a hook for tests */ #endif @@ -377,11 +470,11 @@ last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE); last_chunk->prev_size = first_chunk->size; last_chunk->size = END_MARKER; - assert(last_chunk == next_chunk_u(first_chunk)); + assert(last_chunk == next_chunk(first_chunk)); insert_unsorted(first_chunk); -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST _stm_largemalloc_keep = NULL; #endif } @@ -411,11 +504,10 @@ return 0; /* unlink the prev_chunk from the doubly-linked list */ - prev_chunk->d.next->prev = prev_chunk->d.prev; - prev_chunk->d.prev->next = prev_chunk->d.next; + unlink_chunk(prev_chunk); /* reduce the prev_chunk */ - assert((prev_chunk->size & ~FLAG_SORTED) == last_chunk->prev_size); + assert(prev_chunk->size == last_chunk->prev_size); prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk - CHUNK_HEADER_SIZE; @@ -423,7 +515,7 @@ new_last_chunk->prev_size = prev_chunk->size; new_last_chunk->size = END_MARKER; last_chunk = new_last_chunk; - assert(last_chunk == next_chunk_u(prev_chunk)); + assert(last_chunk == next_chunk(prev_chunk)); insert_unsorted(prev_chunk); } @@ -434,7 +526,7 @@ new_last_chunk->prev_size = BOTH_CHUNKS_USED; new_last_chunk->size = END_MARKER; last_chunk = new_last_chunk; - assert(last_chunk == next_chunk_u(old_last_chunk)); + assert(last_chunk == next_chunk(old_last_chunk)); /* then free the last_chunk (turn it from "used" to "free) */ _stm_large_free((char *)&old_last_chunk->d); @@ -445,7 +537,7 @@ static inline bool _largemalloc_sweep_keep(mchunk_t *chunk) { -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST if (_stm_largemalloc_keep != NULL) return _stm_largemalloc_keep((char *)&chunk->d); #endif @@ -460,7 +552,7 @@ mchunk_t *mnext, *chunk = first_chunk; if (chunk->prev_size == THIS_CHUNK_FREE) - chunk = next_chunk_a(chunk); /* go to the first non-free chunk */ + chunk = next_chunk(chunk); /* go to the first non-free chunk */ while (chunk != last_chunk) { @@ -468,9 +560,9 @@ assert(chunk->prev_size != THIS_CHUNK_FREE); /* first figure out the next non-free chunk */ - mnext = next_chunk_u(chunk); + mnext = next_chunk(chunk); if (mnext->prev_size == THIS_CHUNK_FREE) - mnext = next_chunk_a(mnext); + mnext = next_chunk(mnext); /* use the callback to know if 'chunk' contains an object that survives or dies */ diff --git a/rpython/translator/stm/src_stm/stm/misc.c b/rpython/translator/stm/src_stm/stm/misc.c --- a/rpython/translator/stm/src_stm/stm/misc.c +++ b/rpython/translator/stm/src_stm/stm/misc.c @@ -81,7 +81,9 @@ mutex_pages_unlock(); return result; } +#endif +#ifdef STM_LARGEMALLOC_TEST void _stm_mutex_pages_lock(void) { mutex_pages_lock(); diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -63,3 +63,5 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } + +void _stm_mutex_pages_lock(void); From noreply at buildbot.pypy.org Sun Apr 6 12:50:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 12:50:05 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140406105005.6871A1C0110@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70475:8929b1e5751d Date: 2014-04-06 12:22 +0200 http://bitbucket.org/pypy/pypy/changeset/8929b1e5751d/ Log: fix diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c --- a/rpython/translator/c/src/mem.c +++ b/rpython/translator/c/src/mem.c @@ -92,6 +92,7 @@ { long count = 0; struct pypy_debug_alloc_s *p; + spinlock_acquire(pypy_debug_alloc_lock, 'R'); for (p = pypy_debug_alloc_list; p; p = p->next) count++; if (count > 0) @@ -107,6 +108,7 @@ else fprintf(stderr, " (use PYPY_ALLOC=1 to see the list)\n"); } + spinlock_release(pypy_debug_alloc_lock); } #endif /* RPY_ASSERT */ From noreply at buildbot.pypy.org Sun Apr 6 12:50:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 12:50:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Better name in case we get a Fatal RPython error Message-ID: <20140406105006.C1B341C0110@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70476:5920843e1b48 Date: 2014-04-06 12:49 +0200 http://bitbucket.org/pypy/pypy/changeset/5920843e1b48/ Log: Better name in case we get a Fatal RPython error diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -9,8 +9,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.tool import rffi_platform -class error(Exception): +class RThreadError(Exception): pass +error = RThreadError translator_c_dir = py.path.local(cdir) From noreply at buildbot.pypy.org Sun Apr 6 15:31:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 15:31:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Fix (thanks camara) Message-ID: <20140406133140.35B841C350A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5186:a63ce8e7cfdf Date: 2014-04-06 15:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/a63ce8e7cfdf/ Log: Fix (thanks camara) diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -354,7 +354,7 @@ false conflicts, but some conflicts may be regarded as "false" anyway: these involve most importantly the built-in dictionary type, for which we would like accesses and writes using independent keys to be truly -independent. Other built-in data structures we a similar issue are +independent. Other built-in data structures have a similar issue, like lists: ideally, writes to different indexes should not cause conflicts; but more generally, we would need a mechanism, possibly under the control of the application, to do things like append an item to a list From noreply at buildbot.pypy.org Sun Apr 6 20:41:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Apr 2014 20:41:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: A gdb script that adds two built-in functions (see doc). The very minimal to debug without getting crazy imo. Message-ID: <20140406184147.1AC4B1C01F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1139:8ae8781fb784 Date: 2014-04-06 20:41 +0200 http://bitbucket.org/pypy/stmgc/changeset/8ae8781fb784/ Log: A gdb script that adds two built-in functions (see doc). The very minimal to debug without getting crazy imo. diff --git a/c7/gdb/gdb_stm.py b/c7/gdb/gdb_stm.py new file mode 100644 --- /dev/null +++ b/c7/gdb/gdb_stm.py @@ -0,0 +1,49 @@ +""" Adds two built-in functions: $rfs(p=0) and $rgs(p=0). + +Returns the number or the address 'p', offset with the value of +the %fs or %gs register in the current thread. + +Usage: you can for example add this line in your ~/.gdbinit: + + python execfile('/path/to/gdb_stm.py') +""" +import gdb + +def gdb_function(func): + class Func(gdb.Function): + __doc__ = func.__doc__ + invoke = staticmethod(func) + Func(func.__name__) + +# ------------------------------------------------------- + +SEG_FS = 0x1003 +SEG_GS = 0x1004 + +def get_segment_register(which): + v = gdb.parse_and_eval('(long*)malloc(8)') + L = gdb.lookup_type('long') + gdb.parse_and_eval('arch_prctl(%d, %d)' % (which, int(v.cast(L)))) + result = int(v.dereference()) + gdb.parse_and_eval('free(%d)' % (int(v.cast(L)),)) + return result + +def rfsrgs(name, which): + seg = get_segment_register(which) + if name is None: + return seg + tp = name.type + if tp.code == gdb.TYPE_CODE_INT: + return name + seg + assert tp.code == gdb.TYPE_CODE_PTR + L = gdb.lookup_type('long') + return (name.cast(L) + seg).cast(tp) + + at gdb_function +def rfs(name=None): + return rfsrgs(name, SEG_FS) + + at gdb_function +def rgs(name=None): + return rfsrgs(name, SEG_GS) + From noreply at buildbot.pypy.org Sun Apr 6 22:03:03 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 6 Apr 2014 22:03:03 +0200 (CEST) Subject: [pypy-commit] extradoc stm-edit: suggest edits to donatetm2 Message-ID: <20140406200303.9F8651C01F7@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: stm-edit Changeset: r5187:012112491eee Date: 2014-04-06 22:54 +0300 http://bitbucket.org/pypy/extradoc/changeset/012112491eee/ Log: suggest edits to donatetm2 diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -49,36 +49,36 @@ they can use the existing ``threading`` module, with its associated GIL and the complexities of real multi-threaded programming (locks, deadlocks, races, etc.), which make this solution less attractive. The -big alternative is for them to rely on one of various multi-process -solutions that are outside the scope of the core language. All of them require a -big restructuring of the program and often need extreme care and extra +most attractive alternative for most developers is to rely on one of various multi-process +solutions that are outside the scope of the core Python language. All of them require a +major restructuring of the program and often need extreme care and extra knowledge to use them. -The aim of this series of proposals is to research and implement +We propose implemention of Transactional Memory in PyPy. This is a technique that recently came to the forefront of the multi-core scene. It promises to offer multi-core CPU -usage without requiring to fall back to the multi-process solutions -described above, and also should allow to change the core of the event systems +usage without the explicit multiprocessing or event techniques above, +and also should allow modifying the core of the event systems mentioned above to enable the use of multiple cores without the explicit use of the ``threading`` module by the user. The first proposal was launched near the start of 2012 and has covered -the fundamental research part, up to the point of getting a first +much of the fundamental research, up to the point of getting a first version of PyPy working in a very roughly reasonable state (after collecting about USD$27'000, which is little more than half of the money -that was asked; hence the present second call for donations). +that was sought; hence the present second call for donations). -This second proposal aims at fixing the remaining issues until we get a -really good GIL-free PyPy (described in `goal 1`_ below); and then we -will focus on the various new features needed to actually use multiple +We now propose fixing the remaining issues to obtaining a +really good GIL-free PyPy (described in `goal 1`_ below). We +will then focus on the various new features needed to actually use multiple cores without explicitly using multithreading (`goal 2`_ below), up to -and including adapting some existing framework libraries like for +and including adapting some existing framework libraries, for example Twisted, Tornado, Stackless, or gevent (`goal 3`_ below). -In more details -=============== +In more detail +============== This is a call for financial help in implementing a version of PyPy able to use multiple processors in a single process, called PyPy-TM; and @@ -87,14 +87,14 @@ Armin Rigo and Remi Meier and possibly others. We currently estimate the final performance goal to be a slow-down of -25% to 40%, i.e. running a fully serial application would take between +25% to 40% from the current non-TM PyPy; i.e. running a fully serial application would take between 1.25 and 1.40x the time it takes in a regular PyPy. (This goal has been reached already in some cases, but we need to make this result more -broadly applicable.) We feel confident that it can work, in the -following sense: the performance of PyPy-TM running any suitable +broadly applicable.) We feel confident that the performance of PyPy-TM will +running any suitable application should scale linearly or close-to-linearly with the number of processors. This means that starting with two cores, such -applications should perform better than in a regular PyPy. (All numbers +applications should perform better than a non-TM PyPy. (All numbers presented here are comparing different versions of PyPy which all have the JIT enabled.) @@ -149,7 +149,7 @@ with a much smaller Hardware Transactional Memory (HTM) library based on hardware features and running on Haswell-generation processors. This has been attempted by Remi Meier recently. However, it seems that we -see scaling problems (as we expected them): the current generation of HTM +see the scaling problems as expected: the current generation of HTM processors is limited to run small-scale transactions. Even the default transaction size used in PyPy-STM is often too much for HTM; and reducing this size increases overhead without completely solving the @@ -162,15 +162,15 @@ generally. A CPU with support for the virtual memory described in this paper would certainly be better for running PyPy-HTM. -Another issue is sub-cache-line false conflicts (conflicts caused by two +Another issue in HTM is sub-cache-line false conflicts (conflicts caused by two independent objects that happens to live in the same cache line, which is usually 64 bytes). This is in contrast with the current PyPy-STM, which doesn't have false conflicts of this kind at all and might thus be -ultimately better for very-long-running transactions. None of the -papers we know of discusses this issue. +ultimately better for very-long-running transactions. We are not aware of +published research discussing issues of very-long-running transactions. Note that right now PyPy-STM has false conflicts within the same object, -e.g. within a list or a dictionary; but we can more easily do something +e.g. within a list or a dictionary; but we can easily do something about it (see `goal 2_`). Also, it might be possible in PyPy-HTM to arrange objects in memory ahead of time so that such conflicts are very rare; but we will never get a rate of exactly 0%, which might be @@ -179,20 +179,20 @@ .. _`Virtualizing Transactional Memory`: http://pages.cs.wisc.edu/~isca2005/papers/08A-02.PDF -Why do it with PyPy instead of CPython? +Why do TM with PyPy instead of CPython? --------------------------------------- While there have been early experiments on Hardware Transactional Memory with CPython (`Riley and Zilles (2006)`__, `Tabba (2010)`__), there has -been no recent one. The closest is an attempt using `Haswell on the +been none in the past few years. The closest is an attempt using `Haswell on the Ruby interpreter`__. None of these attempts tries to do the same using Software Transactional Memory. We would nowadays consider it possible to adapt our stmgc-c7 library for CPython, but it would be a lot of -work, starting from changing the reference-counting scheme. PyPy is +work, starting from changing the reference-counting garbage colleciton scheme. PyPy is better designed to be open to this kind of research. -But the best argument from an external point of view is probably that -PyPy has got a JIT to start with. It is thus starting from a better +However, the best argument from an objective point of view is probably that +PyPy has already implemented a JIT. It is thus starting from a better position in terms of performance, particularly for the long-running kind of programs that we target here. @@ -207,7 +207,7 @@ PyPy-TM will be slower than judicious usage of existing alternatives, based on multiple processes that communicate with each other in one way or another. The counter-argument is that TM is not only a cleaner -solution: there are cases in which it is not doable to organize (or +solution: there are cases in which it is not possilbe to organize (or retrofit) an existing program into the particular format needed for the alternatives. In particular, small quickly-written programs don't need the additional baggage of cross-process communication; and large @@ -217,35 +217,35 @@ rest of the program should work without changes. -Other platforms than the x86-64 Linux +Platforms other than the x86-64 Linux ------------------------------------- -The first thing to note is that the current solution depends on having a -huge address space available. If it were to be ported to any 32-bit -architecture, the limitation to 2GB or 4GB of address space would become -very restrictive: the way it works right now would further divide this +The current solution depends on having a +huge address space available. Porting to any 32-bit +architecture would quickly run into the limitation of a 2GB or 4GB of address space. +The way TM works right now would further divide this limit by N+1, where N is the number of segments. It might be possible to create partially different memory views for multiple threads that each access the same range of addresses; this would require extensions that are very OS-specific. We didn't investigate so far. -The current version, which thus only works on 64-bit, still relies +The current 64-bit version relies heavily on Linux- and clang-only features. We believe it is a suitable restriction: a lot of multi- and many-core servers commonly available are nowadays x86-64 machines running Linux. Nevertheless, non-Linux solutions appear to be possible as well. OS/X (and likely the various BSDs) seems to handle ``mmap()`` better than Linux does, and can remap individual pages of an existing mapping to various pages without hitting -a limit of 65536 like Linux. Windows might also have a way, although we -didn't measure yet; but the first issue with Windows would be to support -Win64, which the regular PyPy doesn't. +a limit of 65536 like Linux. Windows might also have a solution, although we +didn't measure yet; but first we would need a 64-bit Windows PyPy, which has +not seen much active support. -We will likely explore the OS/X way (as well as the Windows way if Win64 -support grows in PyPy), but this is not included in the scope of this -proposal. +We will likely explore the OS/X path (as well as the Windows path if Win64 +support grows in PyPy), but this is not part of this current +donation proposal. It might be possible to adapt the work done on x86-64 to the 64-bit -ARMv8 as well, but we didn't investigate so far. +ARMv8 as well. We have not investigated this so far. More readings From noreply at buildbot.pypy.org Sun Apr 6 23:43:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 6 Apr 2014 23:43:06 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: merge default into branch Message-ID: <20140406214306.9F0CD1D2B8E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70477:ad452a6db3f7 Date: 2014-04-05 22:10 +0300 http://bitbucket.org/pypy/pypy/changeset/ad452a6db3f7/ Log: merge default into branch diff too long, truncating to 2000 out of 2232 lines diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,7 +48,10 @@ def tearDown(self): os.chdir(self.old_dir) - shutil.rmtree(self.temp_dir, True) + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,7 +219,6 @@ if restype is None: import ctypes restype = ctypes.c_int - self._argtypes_ = argsl self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -19,5 +19,5 @@ fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) imp.load_module('_ctypes_test', fp, filename, description) except ImportError: - print 'could not find _ctypes_test in',output_dir + print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -4,7 +4,6 @@ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ .. _`pypy/bin/pyinteractive.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/pyinteractive.py @@ -35,7 +34,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -49,21 +47,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -75,6 +73,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -93,16 +96,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -429,12 +429,27 @@ Could we use LLVM? ------------------ -There is a (static) translation backend using LLVM in the branch -``llvm-translation-backend``. It can translate PyPy with or without the JIT on -Linux. +In theory yes. But we tried to use it 5 or 6 times already, as a +translation backend or as a JIT backend --- and failed each time. -Using LLVM as our JIT backend looks interesting as well -- we made an attempt, -but it failed: LLVM has no way to patch the generated machine code. +In more details: using LLVM as a (static) translation backend is +pointless nowadays because you can generate C code and compile it with +clang. (Note that compiling PyPy with clang gives a result that is not +faster than compiling it with gcc.) We might in theory get extra +benefits from LLVM's GC integration, but this requires more work on the +LLVM side before it would be remotely useful. Anyway, it could be +interfaced via a custom primitive in the C code. (The latest such +experimental backend is in the branch ``llvm-translation-backend``, +which can translate PyPy with or without the JIT on Linux.) + +On the other hand, using LLVM as our JIT backend looks interesting as +well --- but again we made an attempt, and it failed: LLVM has no way to +patch the generated machine code. + +So the position of the core PyPy developers is that if anyone wants to +make an N+1'th attempt with LLVM, they are welcome, and will be happy to +provide help in the IRC channel, but they are left with the burden of proof +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, @@ -491,7 +485,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/stm.rst @@ -0,0 +1,284 @@ +====================== +Transactional Memory +====================== + +.. contents:: + + +This page is about ``pypy-stm``, a special in-development version of +PyPy which can run multiple independent CPU-hungry threads in the same +process in parallel. It is side-stepping what is known in the Python +world as the "global interpreter lock (GIL)" problem. + +"STM" stands for Software Transactional Memory, the technique used +internally. This page describes ``pypy-stm`` from the perspective of a +user, describes work in progress, and finally gives references to more +implementation details. + +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the 2nd call +for donation (*not ready yet*) + +.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html + + +Introduction +============ + +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats +listed below, it should be in theory within 25%-50% of the speed of a +regular PyPy, comparing the JITting version in both cases. It is called +STM for Software Transactional Memory, which is the internal technique +used (see `Reference to implementation details`_). + +What you get in exchange for this slow-down is that ``pypy-stm`` runs +any multithreaded Python program on multiple CPUs at once. Programs +running two threads or more in parallel should ideally run faster than +in a regular PyPy, either now or soon as issues are fixed. In one way, +that's all there is to it: this is a GIL-less Python, feel free to +`download and try it`__. However, the deeper idea behind the +``pypy-stm`` project is to improve what is so far the state-of-the-art +for using multiple CPUs, which for cases where separate processes don't +work is done by writing explicitly multi-threaded programs. Instead, +``pypy-stm`` is flushing forward an approach to *hide* the threads, as +described below in `atomic sections`_. + + +.. __: + +Current status +============== + +**pypy-stm requires 64-bit Linux for now.** + +Development is done in the branch `stmgc-c7`_. If you are only +interested in trying it out, you can download a Ubuntu 12.04 binary +here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, +but not stripped of debug symbols). The current version supports four +"segments", which means that it will run up to four threads in parallel +(in other words, you get a GIL effect again, but only if trying to +execute more than 4 threads). + +To build a version from sources, you first need to compile a custom +version of clang; we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +for all checkouts). Then apply all the patches in `this directory`__: +they are fixes for the very extensive usage that pypy-stm does of a +clang-only feature (without them, you get crashes of clang). Then get +the branch `stmgc-c7`_ of PyPy and run:: + + rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py + +.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/misc/ +.. __: http://clang.llvm.org/get_started.html +.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ + + +Caveats: + +* So far, small examples work fine, but there are still a number of + bugs. We're busy fixing them. + +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not detected correctly, so may + cause segmentation faults. + +* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, + which is "only" bad). Moreover, you should run it with a command like + ``pypy-stm --jit trace_limit=60000 args...``; the default value of + 6000 for ``trace_limit`` is currently too low (6000 should become + reasonable again as we improve). Also, in order to produce machine + code, the JIT needs to enter a special single-threaded mode for now. + This all means that you *will* get very bad performance results if + your program doesn't run for *many* seconds for now. + +* The GC is new; although clearly inspired by PyPy's regular GC, it + misses a number of optimizations for now. Programs allocating large + numbers of small objects that don't immediately die, as well as + programs that modify large lists or dicts, suffer from these missing + optimizations. + +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). + +* The STM system is based on very efficient read/write barriers, which + are mostly done (their placement could be improved a bit in + JIT-generated machine code). But the overall bookkeeping logic could + see more improvements (see Statistics_ below). + +* You can use `atomic sections`_, but the most visible missing thing is + that you don't get reports about the "conflicts" you get. This would + be the first thing that you need in order to start using atomic + sections more extensively. Also, for now: for better results, try to + explicitly force a transaction break just before (and possibly after) + each large atomic section, with ``time.sleep(0)``. + +* Forking the process is slow because the complete memory needs to be + copied manually right now. + +* Very long-running processes should eventually crash on an assertion + error because of a non-implemented overflow of an internal 29-bit + number, but this requires at the very least ten hours --- more + probably, several days or more. + +.. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h + + + +Statistics +========== + +When a non-main thread finishes, you get statistics printed to stderr, +looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + spin loop 129512 0.094 s + +The first number is a counter; the second number gives the associated +time (the amount of real time that the thread was in this state; the sum +of all the times should be equal to the total time between the thread's +start and the thread's end). The most important points are "run +committed", which gives the amount of useful work, and "outside +transaction", which should give the time spent e.g. in library calls +(right now it seems to be a bit larger than that; to investigate). +Everything else is overhead of various forms. (Short-, medium- and +long-term future work involves reducing this overhead :-) + +These statistics are not printed out for the main thread, for now. + + +Atomic sections +=============== + +While one of the goal of pypy-stm is to give a GIL-free but otherwise +unmodified Python, the other goal is to push for a better way to use +multithreading. For this, you (as the Python programmer) get an API +in the ``__pypy__.thread`` submodule: + +* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in + a ``with __pypy__.thread.atomic:`` statement). It runs the whole + block of code without breaking the current transaction --- from + the point of view of a regular CPython/PyPy, this is equivalent to + saying that the GIL will not be released at all between the start and + the end of this block of code. + +The obvious usage is to use atomic blocks in the same way as one would +use locks: to protect changes to some shared data, you do them in a +``with atomic`` block, just like you would otherwise do them in a ``with +mylock`` block after ``mylock = thread.allocate_lock()``. This allows +you not to care about acquiring the correct locks in the correct order; +it is equivalent to having only one global lock. This is how +transactional memory is `generally described`__: as a way to efficiently +execute such atomic blocks, running them in parallel while giving the +illusion that they run in some serial order. + +.. __: http://en.wikipedia.org/wiki/Transactional_memory + +However, the less obvious intended usage of atomic sections is as a +wide-ranging replacement of explicit threads. You can turn a program +that is not multi-threaded at all into a program that uses threads +internally, together with large atomic sections to keep the behavior +unchanged. This capability can be hidden in a library or in the +framework you use; the end user's code does not need to be explicitly +aware of using threads. For a simple example of this, see +`transaction.py`_ in ``lib_pypy``. The idea is that if you have a +program where the function ``f(key, value)`` runs on every item of some +big dictionary, you can replace the loop with:: + + for key, value in bigdict.items(): + transaction.add(f, key, value) + transaction.run() + +This code runs the various calls to ``f(key, value)`` using a thread +pool, but every single call is done in an atomic section. The end +result is that the behavior should be exactly equivalent: you don't get +any extra multithreading issue. + +This approach hides the notion of threads from the end programmer, +including all the hard multithreading-related issues. This is not the +first alternative approach to explicit threads; for example, OpenMP_ is +one. However, it is one of the first ones which does not require the +code to be organized in a particular fashion. Instead, it works on any +Python program which has got latent, imperfect parallelism. Ideally, it +only requires that the end programmer identifies where this parallelism +is likely to be found, and communicates it to the system, using for +example the ``transaction.add()`` scheme. + +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP + +================== + +Other APIs in pypy-stm: + +* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" + in this pypy-stm. This is the limit above which more threads will not + be able to execute on more cores. (Right now it is limited to 4 due + to inter-segment overhead, but should be increased in the future. It + should also be settable, and the default value should depend on the + number of actual CPUs.) + +* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but + raises an exception if you attempt to nest it inside another + ``atomic``. + +* ``__pypy__.thread.signals_enabled``: a context manager that runs + its block with signals enabled. By default, signals are only + enabled in the main thread; a non-main thread will not receive + signals (this is like CPython). Enabling signals in non-main threads + is useful for libraries where threads are hidden and the end user is + not expecting his code to run elsewhere than in the main thread. + +Note that all of this API is (or will be) implemented in a regular PyPy +too: for example, ``with atomic`` will simply mean "don't release the +GIL" and ``getsegmentlimit()`` will return 1. + +================== + + +Reference to implementation details +=================================== + +The core of the implementation is in a separate C library called stmgc_, +in the c7_ subdirectory. Please see the `README.txt`_ for more +information. In particular, the notion of segment is discussed there. + +.. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ +.. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ +.. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt + +PyPy itself adds on top of it the automatic placement of read__ and write__ +barriers and of `"becomes-inevitable-now" barriers`__, the logic to +`start/stop transactions as an RPython transformation`__ and as +`supporting`__ `C code`__, and the support in the JIT (mostly as a +`transformation step on the trace`__ and generation of custom assembler +in `assembler.py`__). + +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/readbarrier.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/memory/gctransform/stmframework.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/inevitable.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/jitdriver.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.h +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -127,3 +127,10 @@ .. branch: win32-fixes4 fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field \ No newline at end of file diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -86,7 +86,7 @@ option (this is the default at some optimization levels like ``-O1``, but unneeded for high-performance translations like ``-O2``). You may get it at -http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc-7.1.tar.gz +http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with pypy. Unpack this folder in the base directory. Then open a command diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -690,7 +690,7 @@ def setup_bootstrap_path(executable): """ - Try to to as little as possible and to have the stdlib in sys.path. In + Try to do as little as possible and to have the stdlib in sys.path. In particular, we cannot use any unicode at this point, because lots of unicode operations require to be able to import encodings. """ diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -49,13 +49,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) @@ -3011,7 +3017,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def stmt_del_lineno(space, w_self): @@ -3038,7 +3045,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def stmt_del_col_offset(space, w_self): @@ -3074,7 +3082,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def FunctionDef_del_name(space, w_self): @@ -3201,7 +3210,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def ClassDef_del_name(space, w_self): @@ -3665,7 +3675,8 @@ w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'nl') + # need to save the original object too + w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state |= 16 def Print_del_nl(space, w_self): @@ -4571,7 +4582,8 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'module') + # need to save the original object too + w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 def ImportFrom_del_module(space, w_self): @@ -4620,7 +4632,8 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'level') + # need to save the original object too + w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 def ImportFrom_del_level(space, w_self): @@ -4938,7 +4951,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def expr_del_lineno(space, w_self): @@ -4965,7 +4979,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def expr_del_col_offset(space, w_self): @@ -6292,7 +6307,8 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'n') + # need to save the original object too + w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 def Num_del_n(space, w_self): @@ -6343,7 +6359,8 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + # need to save the original object too + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Str_del_s(space, w_self): @@ -6423,7 +6440,8 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'attr') + # need to save the original object too + w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 def Attribute_del_attr(space, w_self): @@ -6618,7 +6636,8 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'id') + # need to save the original object too + w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 def Name_del_id(space, w_self): @@ -6853,7 +6872,8 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + # need to save the original object too + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Const_del_value(space, w_self): @@ -7521,7 +7541,8 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + # need to save the original object too + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def excepthandler_del_lineno(space, w_self): @@ -7548,7 +7569,8 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + # need to save the original object too + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def excepthandler_del_col_offset(space, w_self): @@ -7716,7 +7738,8 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'vararg') + # need to save the original object too + w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 def arguments_del_vararg(space, w_self): @@ -7746,7 +7769,8 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'kwarg') + # need to save the original object too + w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 4 def arguments_del_kwarg(space, w_self): @@ -7824,7 +7848,8 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + # need to save the original object too + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def keyword_del_arg(space, w_self): @@ -7905,7 +7930,8 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'name') + # need to save the original object too + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 def alias_del_name(space, w_self): @@ -7935,7 +7961,8 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'asname') + # need to save the original object too + w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 def alias_del_asname(space, w_self): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -459,6 +459,7 @@ self.emit("raise OperationError(space.w_TypeError, " "space.w_None)", 3) else: + save_original_object = True level = 2 if field.opt and field.type.value != "int": self.emit("if space.is_w(w_new_value, space.w_None):", 2) @@ -596,13 +597,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except OperationError: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -387,3 +387,40 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + + def test_dict_astNode(self): + import ast + num_node = ast.Num(n=2, lineno=2, col_offset=3) + dict_res = num_node.__dict__ + + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Num_notfullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 + num_node2 = copy.deepcopy(num_node) + + def test_issue1673_Num_fullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset + dict_res = num_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Str(self): + import ast + import copy + str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node.lineno == 2 + str_node2 = copy.deepcopy(str_node) + dict_res = str_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2} + \ No newline at end of file diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -528,7 +528,7 @@ f = open(fn) exc = raises(EnvironmentError, f.truncate, 3) if sys.platform == 'win32': - assert exc.value.winerror == 5 # ERROR_ACCESS_DENIED + assert exc.value.errno == 5 # ERROR_ACCESS_DENIED else: # CPython explicitely checks the file mode # PyPy relies on the libc to raise the error diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -1,10 +1,17 @@ import py from rpython.config.translationoption import get_combined_translation_config from rpython.config.translationoption import set_opt_level -from rpython.config.config import ConflictConfigError +from rpython.config.config import ConflictConfigError, ConfigError +from rpython.translator.platform import platform as compiler def test_no_gcrootfinder_with_boehm(): config = get_combined_translation_config() config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') + +if compiler.name == 'msvc': + def test_no_asmgcrot_on_msvc(): + config = get_combined_translation_config() + py.test.raises(ConfigError, config.translation.setoption, + 'gcrootfinder', 'asmgcc', 'user') diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,9 +1,10 @@ import sys import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption -from rpython.config.config import ChoiceOption, StrOption, Config +from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors +from rpython.translator.platform import platform as compiler DEFL_INLINE_THRESHOLD = 32.4 # just enough to inline add__Int_Int() # and just small enough to prevend inlining of some rlist functions. @@ -16,8 +17,13 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] +elif compiler.name == 'msvc': + DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -85,7 +91,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ROOTFINDERS, "shadowstack", cmdline="--gcrootfinder", requires={ diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -49,6 +49,11 @@ from rpython.rlib.rfile import create_file return ctx.appcall(create_file, *args_w) + at register_flow_sc(os.fdopen) +def sc_os_fdopen(ctx, *args_w): + from rpython.rlib.rfile import create_fdopen_rfile + return ctx.appcall(create_fdopen_rfile, *args_w) + @register_flow_sc(os.tmpfile) def sc_os_tmpfile(ctx): from rpython.rlib.rfile import create_temp_rfile diff --git a/rpython/flowspace/test/test_unroll.py b/rpython/flowspace/test/test_unroll.py --- a/rpython/flowspace/test/test_unroll.py +++ b/rpython/flowspace/test/test_unroll.py @@ -1,23 +1,10 @@ import operator + from rpython.flowspace.test.test_objspace import Base -from rpython.rlib.unroll import unrolling_zero, unrolling_iterable +from rpython.rlib.unroll import unrolling_iterable + class TestUnroll(Base): - - def test_unrolling_int(self): - l = range(10) - def f(tot): - i = unrolling_zero - while i < len(l): - tot += l[i] - i = i + 1 - return tot*2 - assert f(0) == sum(l)*2 - - graph = self.codetest(f) - ops = self.all_operations(graph) - assert ops == {'inplace_add': 10, 'mul': 1} - def test_unroller(self): l = unrolling_iterable(range(10)) def f(tot): diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py @@ -1,4 +1,9 @@ +import py from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests +from rpython.translator.platform import platform as compiler + +if compiler.name == 'msvc': + py.test.skip('asmgcc buggy on msvc') class TestAsmGcc(CompileFrameworkTests): gcrootfinder = "asmgcc" diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -567,7 +567,8 @@ # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. - hash = self.status & self.ST_SHIFT_MASK + hash = self.status + assert hash == (self.status & self.ST_SHIFT_MASK) # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. diff --git a/rpython/jit/metainterp/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py rename from rpython/jit/metainterp/test/test_intbound.py rename to rpython/jit/metainterp/optimizeopt/test/test_intbound.py --- a/rpython/jit/metainterp/test/test_intbound.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py @@ -1,5 +1,7 @@ from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 + from copy import copy import sys from rpython.rlib.rarithmetic import LONG_BIT @@ -235,8 +237,8 @@ for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)): #assert not b.lshift_bound(shift_count_bound).has_upper assert not b.rshift_bound(shift_count_bound).has_upper - - + + def test_div_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -258,7 +260,6 @@ assert a.contains(0) - def test_sub_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -271,3 +272,14 @@ a=bound(2, 4).sub_bound(bound(1, 2)) assert not a.contains(-1) assert not a.contains(4) + + +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py deleted file mode 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py +++ /dev/null @@ -1,12 +0,0 @@ -from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 - - -def test_next_pow2_m1(): - assert next_pow2_m1(0) == 0 - assert next_pow2_m1(1) == 1 - assert next_pow2_m1(7) == 7 - assert next_pow2_m1(256) == 511 - assert next_pow2_m1(255) == 255 - assert next_pow2_m1(80) == 127 - assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 - assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py rename from rpython/jit/metainterp/test/test_virtualstate.py rename to rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -89,6 +89,11 @@ assert isgeneral(OptValue(ConstPtr(fooref)), OptValue(ConstPtr(fooref))) + value1 = OptValue(BoxPtr()) + value1.make_nonnull(None) + value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + assert not isgeneral(value1, value2) + def test_field_matching_generalization(self): const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) @@ -202,6 +207,17 @@ """ self.compare(guards, expected, [box]) + def test_known_value(self): + value1 = OptValue(self.nodebox) + value1.make_constant(ConstInt(1)) + box = self.nodebox + guards = value1.make_guards(box) + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.compare(guards, expected, [box]) + def test_equal_inputargs(self): value = OptValue(self.nodebox) classbox = self.cpu.ts.cls_of_box(self.nodebox) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -296,7 +296,7 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state @@ -403,9 +403,9 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) - def finilize_short_preamble(self, start_label): + def finalize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP target_token = start_label.getdescr() diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -18,7 +18,15 @@ position = -1 def generalization_of(self, other, renum, bad): - raise NotImplementedError + assert self.position != -1 + if self.position in renum: + result = renum[self.position] == other.position + else: + renum[self.position] = other.position + result = self.generalization_of_renumbering_done(other, renum, bad) + if not result: + bad[self] = bad[other] = None + return result def generate_guards(self, other, box, cpu, extra_guards, renum): if self.generalization_of(other, renum, {}): @@ -67,37 +75,21 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, AbstractVirtualStructStateInfo) assert len(self.fielddescrs) == len(self.fieldstate) assert len(other.fielddescrs) == len(other.fieldstate) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False for i in range(len(self.fielddescrs)): if other.fielddescrs[i] is not self.fielddescrs[i]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -130,11 +122,8 @@ self.known_class = known_class def _generalization_of(self, other): - if not isinstance(other, VirtualStateInfo): - return False - if not self.known_class.same_constant(other.known_class): - return False - return True + return (isinstance(other, VirtualStateInfo) and + self.known_class.same_constant(other.known_class)) def debug_header(self, indent): debug_print(indent + 'VirtualStateInfo(%d):' % self.position) @@ -146,11 +135,8 @@ self.typedescr = typedescr def _generalization_of(self, other): - if not isinstance(other, VStructStateInfo): - return False - if self.typedescr is not other.typedescr: - return False - return True + return (isinstance(other, VStructStateInfo) and + self.typedescr is other.typedescr) def debug_header(self, indent): debug_print(indent + 'VStructStateInfo(%d):' % self.position) @@ -165,28 +151,14 @@ return (isinstance(other, VArrayStateInfo) and self.arraydescr is other.arraydescr) - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False if len(self.fieldstate) != len(other.fieldstate): - bad[self] = None - bad[other] = None return False for i in range(len(self.fieldstate)): if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -216,41 +188,23 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, VArrayStructStateInfo) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False p = 0 for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): - bad[self] = None - bad[other] = None return False for j in range(len(self.fielddescrs[i])): if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[p].generalization_of(other.fieldstate[p], renum, bad): - bad[self] = None - bad[other] = None return False p += 1 return True @@ -302,49 +256,31 @@ self.position_in_notvirtuals = -1 self.lenbound = value.lenbound - def generalization_of(self, other, renum, bad): + def generalization_of_renumbering_done(self, other, renum, bad): # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position if not isinstance(other, NotVirtualStateInfo): - bad[self] = None - bad[other] = None return False if other.level < self.level: - bad[self] = None - bad[other] = None return False if self.level == LEVEL_CONSTANT: if not self.constbox.same_constant(other.constbox): - bad[self] = None - bad[other] = None return False elif self.level == LEVEL_KNOWNCLASS: if not self.known_class.same_constant(other.known_class): - bad[self] = None - bad[other] = None return False + elif self.level == LEVEL_NONNULL: + if other.constbox and not other.constbox.nonnull(): + return False + if not self.intbound.contains_bound(other.intbound): - bad[self] = None - bad[other] = None return False if self.lenbound and other.lenbound: if self.lenbound.mode != other.lenbound.mode or \ self.lenbound.descr != other.lenbound.descr or \ not self.lenbound.bound.contains_bound(other.lenbound.bound): - bad[self] = None - bad[other] = None return False elif self.lenbound: - bad[self] = None - bad[other] = None From noreply at buildbot.pypy.org Sun Apr 6 23:43:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 6 Apr 2014 23:43:07 +0200 (CEST) Subject: [pypy-commit] pypy default: find time() in msvcrt on windows Message-ID: <20140406214307.E94441D2B8E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70478:8d015b1d68df Date: 2014-04-06 18:52 +0300 http://bitbucket.org/pypy/pypy/changeset/8d015b1d68df/ Log: find time() in msvcrt on windows diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -1172,6 +1172,8 @@ # XXX magic: on Windows try to load the function from 'kernel32' too if cfunc is None and hasattr(ctypes, 'windll'): cfunc = get_on_lib(ctypes.windll.kernel32, funcname) + if cfunc is None and hasattr(ctypes, 'windll'): + cfunc = get_on_lib(ctypes.cdll.msvcrt, funcname) if cfunc is None: # function name not found in any of the libraries From noreply at buildbot.pypy.org Sun Apr 6 23:43:09 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 6 Apr 2014 23:43:09 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: pass a test Message-ID: <20140406214309.354E51D2B8E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70479:f9af4c723ac6 Date: 2014-04-07 00:41 +0300 http://bitbucket.org/pypy/pypy/changeset/f9af4c723ac6/ Log: pass a test diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -4,7 +4,7 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement_multiple) + shape_agreement, shape_agreement_multiple) from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.concrete import SliceArray from pypy.module.micronumpy import ufuncs @@ -134,6 +134,9 @@ op_flag.get_it_item = (get_readonly_item, get_readonly_slice) elif op_flag.rw == 'rw': op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) return op_flag def parse_func_flags(space, nditer, w_flags): @@ -154,8 +157,7 @@ if item == 'external_loop': nditer.external_loop = True elif item == 'buffered': - raise OperationError(space.w_NotImplementedError, space.wrap( - 'nditer buffered not implemented yet')) + # For numpy compatability nditer.buffered = True elif item == 'c_index': nditer.tracked_index = 'C' @@ -293,24 +295,35 @@ raise OperationError(space.w_NotImplementedError, space.wrap( 'nditer op_dtypes kwarg not implemented yet')) self.iters=[] - self.shape = iter_shape = shape_agreement_multiple(space, self.seq) - outarg = [i for i in range(len(self.seq)) if self.seq[i] is None] - if len(outarg) > 0: - # Make None operands writeonly and flagged for - # allocation, and everything else defaults to readonly. To write - # to a provided operand, you must specify the write flag manually. + outargs = [i for i in range(len(self.seq)) \ + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation out_dtype = None - for elem in self.seq: - if elem is None: + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i] == 'w': continue out_dtype = ufuncs.find_binop_result_dtype(space, - elem.get_dtype(), out_dtype) - for i in outarg: - self.op_flags[i].get_it_item = (get_readwrite_item, - get_readwrite_slice) - self.op_flags[i].allocate = True - # XXX can we postpone allocation to later? - self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) if self.tracked_index != "": if self.order == "K": self.order = self.seq[0].implementation.order @@ -430,8 +443,10 @@ 'not implemented yet')) def descr_get_operands(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) def descr_get_dtypes(self, space): res = [None] * len(self.seq) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -211,7 +211,7 @@ assert (c == [1., 4., 9.]).all() assert (b == c).all() exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) - assert str(exc.value).startswith('non-broadcastable output') + assert str(exc.value).find('cannot be broadcasted') > 0 def test_outer_product(self): from numpy import nditer, arange From noreply at buildbot.pypy.org Mon Apr 7 13:52:26 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 13:52:26 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged default Message-ID: <20140407115226.8C7771C350E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r772:c50f22904bc5 Date: 2014-04-03 13:25 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c50f22904bc5/ Log: Merged default diff --git a/spyvm/version.py b/spyvm/version.py --- a/spyvm/version.py +++ b/spyvm/version.py @@ -8,6 +8,8 @@ return func(self, *args) def meth(self, *args): return elidable_func(self, self.version, *args) + elidable_func.func_name = "elidable_" + func.func_name + meth.func_name = "elidable_meth_" + func.func_name return meth # In addition to marking the decorated function as "pure", both the receiver From noreply at buildbot.pypy.org Mon Apr 7 13:52:32 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 13:52:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Renamed _likely_methodname to lookup_selector and w_compiledin to lookup_class. Message-ID: <20140407115232.AA04B1C350E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r777:a35c94b4657f Date: 2014-04-04 19:13 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a35c94b4657f/ Log: Renamed _likely_methodname to lookup_selector and w_compiledin to lookup_class. Added other field to CompiledMethod (compiledin_class) to explicitely handle the two sources of information [ a) the class where the CompiledMethod was looked up in and b) the last literal containing the compiledin-class, which is not available in older images ]. Reason for these refactorings was to get rid of a JIT warning saying that an elidable function might have random effects (the previous implementation of compiled_in()). diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -345,9 +345,9 @@ receiver, receiver.class_shadow(self.space)) def _sendSuperSelector(self, w_selector, argcount, interp): - w_compiledin = self.w_method().compiled_in() - assert isinstance(w_compiledin, model.W_PointersObject) - s_compiledin = w_compiledin.as_class_get_shadow(self.space) + compiledin_class = self.w_method().compiled_in() + assert isinstance(compiledin_class, model.W_PointersObject) + s_compiledin = compiledin_class.as_class_get_shadow(self.space) return self._sendSelector(w_selector, argcount, interp, self.w_receiver(), s_compiledin.s_superclass()) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1176,9 +1176,10 @@ # Main method content "bytes", "literals", # Additional info about the method - "_likely_methodname", "w_compiledin" ] + "lookup_selector", "compiledin_class", "lookup_class" ] - _likely_methodname = "" + lookup_selector = "" + lookup_class = None import_from_mixin(version.VersionMixin) def __init__(self, space, bytecount=0, header=0): @@ -1202,19 +1203,24 @@ self._tempsize = tempsize self._primitive = _primitive self.islarge = islarge - self.w_compiledin = None + self.compiledin_class = None self.changed() def setliteral(self, index, w_lit): self.literals[index] = w_lit if index == len(self.literals): - self.w_compiledin = None + self.compiledin_class = None self.changed() def setliterals(self, literals): """NOT RPYTHON""" # Only for testing, not safe. self.literals = literals - self.w_compiledin = None + self.compiledin_class = None + self.changed() + + def set_lookup_class_and_name(self, w_class, selector): + self.lookup_class = w_class + self.lookup_selector = selector self.changed() def setbytes(self, bytes): @@ -1226,11 +1232,6 @@ self.bytes[index0] = character self.changed() - def set_compiled_in(self, w_compiledin): - if not self.w_compiledin: - self.w_compiledin = w_compiledin - self.changed() - # === Getters === def getclass(self, space): @@ -1278,36 +1279,32 @@ assert pc >= 0 and pc < len(self.bytes) return self.bytes[pc] + def compiled_in(self): + # This method cannot be constant/elidable. Looking up the compiledin-class from + # the literals must be done lazily because we cannot analyze the literals + # properly during the fillin-phase. + + # Prefer the information stored in the CompiledMethod literal... + result = self.constant_lookup_class() + if not result: + # ...but fall back to our own information if nothing else available. + result = self.constant_compiledin_class() + if not result: + self.update_compiledin_class_from_literals() + result = self.constant_compiledin_class() + assert result is None or isinstance(result, W_PointersObject) + return result + @constant_for_version - def compiled_in(self): - w_compiledin = self.w_compiledin - if not w_compiledin: - # If the method has not been looked up from a methoddict yet, try to get the - # containing class from it's literals. This should be rare in practice. - w_compiledin = self.compiled_in_from_literals() - self.w_compiledin = w_compiledin - assert w_compiledin is None or isinstance(w_compiledin, W_PointersObject) - return w_compiledin + def constant_compiledin_class(self): + return self.compiledin_class - @jit.dont_look_inside # Tracing into this function is useless. - def compiled_in_from_literals(self): - w_compiledin = None - literals = self.literals - if literals and len(literals) > 0: - # (Blue book, p 607) Last of the literals is either the containing class - # or an association with compiledin as a class - w_candidate = literals[-1] - if isinstance(w_candidate, W_PointersObject) and w_candidate.has_space(): - space = w_candidate.space() # Not pretty to steal the space from another object. - if w_candidate.is_class(space): - w_compiledin = w_candidate - elif w_candidate.size() >= 2: - from spyvm import wrapper - association = wrapper.AssociationWrapper(space, w_candidate) - w_candidate = association.value() - if w_candidate.is_class(space): - w_compiledin = w_candidate - return w_compiledin + @constant_for_version + def constant_lookup_class(self): + return self.lookup_class + + def safe_compiled_in(self): + return self.constant_compiledin_class() or self.constant_lookup_class() # === Object Access === @@ -1353,6 +1350,27 @@ # === Misc === + def update_compiledin_class_from_literals(self): + # (Blue book, p 607) Last of the literals is either the containing class + # or an association with compiledin as a class + literals = self.literals + if literals and len(literals) > 0: + w_literal = literals[-1] + if isinstance(w_literal, W_PointersObject) and w_literal.has_space(): + space = w_literal.space() # Not pretty to steal the space from another object. + compiledin_class = None + if w_literal.is_class(space): + compiledin_class = w_literal + elif w_literal.size() >= 2: + from spyvm import wrapper + association = wrapper.AssociationWrapper(space, w_literal) + w_literal = association.value() + if w_literal.is_class(space): + compiledin_class = w_literal + if compiledin_class: + self.compiledin_class = w_literal + self.changed() + def _become(self, w_other): assert isinstance(w_other, W_CompiledMethod) self.argsize, w_other.argsize = w_other.argsize, self.argsize @@ -1363,8 +1381,8 @@ self.header, w_other.header = w_other.header, self.header self.literalsize, w_other.literalsize = w_other.literalsize, self.literalsize self.islarge, w_other.islarge = w_other.islarge, self.islarge - self._likely_methodname, w_other._likely_methodname = w_other._likely_methodname, self._likely_methodname - self.w_compiledin, w_other.w_compiledin = w_other.w_compiledin, self.w_compiledin + self.lookup_selector, w_other.lookup_selector = w_other.lookup_selector, self.lookup_selector + self.compiledin_class, w_other.compiledin_class = w_other.compiledin_class, self.compiledin_class W_AbstractObjectWithIdentityHash._become(self, w_other) self.changed() w_other.changed() @@ -1373,6 +1391,8 @@ copy = W_CompiledMethod(space, 0, self.getheader()) copy.bytes = list(self.bytes) copy.literals = list(self.literals) + copy.compiledin_class = self.compiledin_class + copy.lookup_selector = self.lookup_selector copy.changed() return copy @@ -1424,20 +1444,20 @@ return "? (no compiledin-info)" def get_identifier_string(self): - return "%s >> #%s" % (self.guess_containing_classname(), self._likely_methodname) + return "%s >> #%s" % (self.guess_containing_classname(), self.lookup_selector) def safe_identifier_string(self): if not we_are_translated(): return self.get_identifier_string() # This has the same functionality as get_identifier_string, but without calling any # methods in order to avoid side effects that prevent translation. - w_class = self.w_compiledin + w_class = self.safe_compiled_in() if isinstance(w_class, W_PointersObject): from spyvm.shadow import ClassShadow s_class = w_class.shadow if isinstance(s_class, ClassShadow): - return "%s >> #%s" % (s_class.getname(), self._likely_methodname) - return "#%s" % self._likely_methodname + return "%s >> #%s" % (s_class.getname(), self.lookup_selector) + return "#%s" % self.lookup_selector class DetachingShadowError(Exception): def __init__(self, old_shadow, new_shadow_class): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -372,7 +372,7 @@ @expose_primitive(FAIL) def func(interp, s_frame, argcount): from spyvm.error import Exit - if s_frame.w_method()._likely_methodname == 'doesNotUnderstand:': + if s_frame.w_method().lookup_selector == 'doesNotUnderstand:': print '' print s_frame.print_stack() w_message = s_frame.peek(0) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -382,9 +382,13 @@ if methoddict is s_new_methoddict: return if methoddict: methoddict.s_class = None - self._s_methoddict = s_new_methoddict - self._s_methoddict.s_class = self - + self.store_s_methoddict(s_new_methoddict) + + def store_s_methoddict(self, s_methoddict): + s_methoddict.s_class = self + s_methoddict.sync_method_cache() + self._s_methoddict = s_methoddict + def attach_s_class(self, s_other): self.subclass_s[s_other] = None @@ -480,7 +484,6 @@ while look_in_shadow is not None: w_method = look_in_shadow.s_methoddict().find_selector(w_selector) if w_method is not None: - w_method.set_compiled_in(look_in_shadow.w_self()) return w_method look_in_shadow = look_in_shadow._s_superclass raise MethodNotFound(self, w_selector) @@ -513,8 +516,7 @@ if self._s_methoddict is None: w_methoddict = model.W_PointersObject(self.space, None, 2) w_methoddict.store(self.space, 1, model.W_PointersObject(self.space, None, 0)) - self._s_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) - self.s_methoddict().sync_method_cache() + self.store_s_methoddict(w_methoddict.as_methoddict_get_shadow(self.space)) self.s_methoddict().invalid = False def installmethod(self, w_selector, w_method): @@ -523,7 +525,7 @@ self.initialize_methoddict() self.s_methoddict().methoddict[w_selector] = w_method if isinstance(w_method, model.W_CompiledMethod): - w_method.w_compiledin = self.w_self() + w_method.compiledin_class = self.w_self() class MethodDictionaryShadow(ListStorageShadow): @@ -537,9 +539,6 @@ self.methoddict = {} ListStorageShadow.__init__(self, space, w_self, 0) - def attach_shadow(self): - self.sync_method_cache() - def update(self): self.sync_method_cache() @@ -558,12 +557,6 @@ ListStorageShadow.store(self, n0, w_value) self.invalid = True - def _as_md_entry(self, w_selector): - if isinstance(w_selector, model.W_BytesObject): - return w_selector.as_string() - else: - return "%r" % w_selector # use the pointer for this - def sync_method_cache(self): if self.size() == 0: return @@ -576,7 +569,10 @@ for i in range(size): w_selector = self.w_self().fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) if not w_selector.is_nil(self.space): - if not isinstance(w_selector, model.W_BytesObject): + if isinstance(w_selector, model.W_BytesObject): + selector = w_selector.as_string() + else: + selector = "? (non-byteobject selector)" pass # TODO: Check if there's more assumptions about this. # Putting any key in the methodDict and running with @@ -588,9 +584,8 @@ "CompiledMethods only, for now. " "If the value observed is nil, our " "invalidating mechanism may be broken.") - selector = self._as_md_entry(w_selector) self.methoddict[w_selector] = w_compiledmethod - w_compiledmethod._likely_methodname = selector + w_compiledmethod.set_lookup_class_and_name(self.s_class.w_self(), selector) if self.s_class: self.s_class.changed() self.invalid = False diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -93,8 +93,6 @@ class mockmethod(object): def __init__(self, val): self.val = val - def set_compiled_in(self, w): - pass w_class = bootstrap_class(0) shadow = w_class.as_class_get_shadow(space) shadow.installmethod(w_foo, mockmethod(1)) @@ -112,7 +110,7 @@ assert subshadow.lookup(w_bar).val == 2 py.test.raises(MethodNotFound, subshadow.lookup, "zork") -def test_w_compiledin(): +def test_compiledin_class(): w_super = bootstrap_class(0) w_class = bootstrap_class(0, w_superclass=w_super) supershadow = w_super.as_class_get_shadow(space) @@ -124,7 +122,7 @@ def new_object(size=0): return model.W_PointersObject(space, None, size) -def test_w_compiledin_assoc(): +def test_compiledin_class_assoc(): val = bootstrap_class(0) assoc = new_object(2) assoc.store(space, 0, new_object()) @@ -133,9 +131,9 @@ meth.setliterals([new_object(), new_object(), assoc ]) assert meth.compiled_in() == val -def test_w_compiledin_missing(): +def test_compiledin_class_missing(): meth = model.W_CompiledMethod(space, 0) - meth.w_compiledin = None + meth.compiledin_class = None meth.setliterals([new_object(), new_object() ]) assert meth.compiled_in() == None From noreply at buildbot.pypy.org Mon Apr 7 13:52:27 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 13:52:27 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Extracted util-function for tests. Message-ID: <20140407115227.BD98D1C350E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r773:a9d2b6743c2c Date: 2014-04-03 13:38 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a9d2b6743c2c/ Log: Extracted util-function for tests. diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -13,37 +13,16 @@ from rpython.jit.metainterp.test.test_ajit import LLJitMixin -from .util import bootstrap_class +from .util import import_bytecodes from spyvm import model, interpreter, primitives, shadow from spyvm import objspace, squeakimage from spyvm.tool.analyseimage import create_squeakimage, create_testimage from rpython.rlib.streamio import open_file_as_stream +import_bytecodes(__name__) + space = objspace.ObjSpace() -# expose the bytecode's values as global constants. -# Bytecodes that have a whole range are exposed as global functions: -# call them with an argument 'n' to get the bytecode number 'base + n'. -# XXX hackish -def setup(): - def make_getter(entry): - def get_opcode_chr(n): - opcode = entry[0] + n - assert entry[0] <= opcode <= entry[1] - return chr(opcode) - return get_opcode_chr - for entry in interpreter.BYTECODE_RANGES: - name = entry[-1] - if len(entry) == 2: # no range - globals()[name] = chr(entry[0]) - else: - globals()[name] = make_getter(entry) -setup() - -# -# Tests -# - sys.setrecursionlimit(5000) class TestLLtype(LLJitMixin): diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,9 +1,11 @@ import py, operator, sys from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants -from .util import create_space_interp, copy_to_module, cleanup_module +from .util import create_space_interp, copy_to_module, cleanup_module, import_bytecodes from spyvm.wrapper import PointWrapper from spyvm.conftest import option +import_bytecodes(__name__) + def setup_module(): space, interp = create_space_interp(bootstrap = True) w = space.w @@ -36,25 +38,6 @@ exp = w(exp) assert list[i].is_same_object(exp) -# expose the bytecode's values as global constants. -# Bytecodes that have a whole range are exposed as global functions: -# call them with an argument 'n' to get the bytecode number 'base + n'. -# XXX hackish -def setup(): - def make_getter(entry): - def get_opcode_chr(n): - opcode = entry[0] + n - assert entry[0] <= opcode <= entry[1] - return chr(opcode) - return get_opcode_chr - for entry in interpreter.BYTECODE_RANGES: - name = entry[-1] - if len(entry) == 2: # no range - globals()[name] = chr(entry[0]) - else: - globals()[name] = make_getter(entry) -setup() - def run_with_faked_primitive_methods(methods, func, active_context=None): # Install faked compiled methods that just invoke the primitive: diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -54,6 +54,24 @@ del mod._copied_objects_ import gc; gc.collect() +def import_bytecodes(module_name): + # expose the bytecode's values as global constants. + # Bytecodes that have a whole range are exposed as global functions: + # call them with an argument 'n' to get the bytecode number 'base + n'. + mod = sys.modules[module_name] + def make_getter(entry): + def get_opcode_chr(n): + opcode = entry[0] + n + assert entry[0] <= opcode <= entry[1] + return chr(opcode) + setattr(mod, name, get_opcode_chr) + for entry in interpreter.BYTECODE_RANGES: + name = entry[-1] + if len(entry) == 2: # no range + setattr(mod, name, chr(entry[0])) + else: + make_getter(entry) + class BootstrappedObjSpace(objspace.ObjSpace): def bootstrap(self): From noreply at buildbot.pypy.org Mon Apr 7 13:52:33 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 13:52:33 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed jit.py, added explicit arguments to full-vm interpreter. Message-ID: <20140407115233.BFF7B1C350E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r778:0940a29d920f Date: 2014-04-07 13:52 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0940a29d920f/ Log: Fixed jit.py, added explicit arguments to full-vm interpreter. diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -25,6 +25,7 @@ # Pass a function inside here to meta-interpret it and show all encountered loops. def meta_interp(func): res = jit.meta_interp(func, [], listcomp=True, listops=True, backendopt=True, inline=True) + print "Finished" if res: print res.__repr__() @@ -34,7 +35,7 @@ def perform(receiver, selector, *args): _, interp, _, _ = read_image(imagename) def interp_miniloop(): - interp.perform(receiver, selector, *args) + return interp.perform(receiver, selector, *args) return interp_miniloop # This will build a jit executing a synthetic method composed of the given bytecodes and literals, @@ -49,22 +50,32 @@ s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) w_frame = s_frame.w_self() def interp_execute_bytes_with_stack(): - interp.loop(w_frame) + return interp.loop(w_frame) return interp_execute_bytes_with_stack -# This will build a JIT for the entire VM. -def full_vm(): +# This will build a JIT for the entire VM. Arguments to the VM entry-point must be provided. +def full_vm(args): import targetimageloadingsmalltalk - argv = sys.argv def interp_full_vm(): - targetimageloadingsmalltalk.entry_point(argv) + return targetimageloadingsmalltalk.entry_point(args) return interp_full_vm +def run_vm_code(imagename, code): + return full_vm(['images/' + imagename, '-r', code]) + +def execute_vm_method(imagename, selector, receiver_num = 0, string_arg=None): + args = ['images/' + imagename, '-m', selector, '-n', receiver_num] + if string_arg: + args.extend(['-a', string_arg]) + return full_vm(args) + def main(): # func = perform(model.W_SmallInteger(1000), 'loopTest2') - # func = perform(model.W_SmallInteger(777), 'name') - func = execute_frame([returnReceiver], [], [model.W_SmallInteger(42)]) + func = perform(model.W_SmallInteger(777), 'name') + # func = execute_frame([returnReceiver], [], [model.W_SmallInteger(42)]) # func = full_vm() + + # func() meta_interp(func) # This is for execution using pytest.py. This way you can get a pdb on assertion-errors etc. From noreply at buildbot.pypy.org Mon Apr 7 13:52:28 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 13:52:28 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed up jit.py a little bit. Message-ID: <20140407115228.E69921C350E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r774:41968a0bc9f3 Date: 2014-04-03 21:43 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/41968a0bc9f3/ Log: Fixed up jit.py a little bit. diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -4,7 +4,7 @@ # view jit. # -import sys, os +import sys from rpython import conftest class o: view = False @@ -12,31 +12,65 @@ conftest.option = o from rpython.jit.metainterp.test.test_ajit import LLJitMixin +from spyvm.test.util import import_bytecodes, read_image +from spyvm import model, shadow -from .util import import_bytecodes -from spyvm import model, interpreter, primitives, shadow -from spyvm import objspace, squeakimage -from spyvm.tool.analyseimage import create_squeakimage, create_testimage -from rpython.rlib.streamio import open_file_as_stream - -import_bytecodes(__name__) - -space = objspace.ObjSpace() +imagename = "mini.image" +# imagename = "minitest.image" sys.setrecursionlimit(5000) +import_bytecodes(__name__) +jit = LLJitMixin() -class TestLLtype(LLJitMixin): +# Pass a function inside here to meta-interpret it and show all encountered loops. +def meta_interp(func): + res = jit.meta_interp(func, [], listcomp=True, listops=True, backendopt=True, inline=True) + if res: + print res.__repr__() - def test_miniloop(self): +# ==== The following are factories for functions to be passed into meta_interp() ==== - from spyvm import objspace - space = objspace.ObjSpace() +# This will build a small jit just for the specified message-send +def perform(receiver, selector, *args): + _, interp, _, _ = read_image(imagename) + def interp_miniloop(): + interp.perform(receiver, selector, *args) + return interp_miniloop - image = create_testimage(space) - interp = interpreter.Interpreter(space, image) - w_selector = interp.perform(space.wrap_string('loopTest2'), "asSymbol") - assert isinstance(w_selector, model.W_BytesObject) - def interp_w(): - interp.perform(model.W_SmallInteger(1000), w_selector) +# This will build a jit executing a synthetic method composed of the given bytecodes and literals, +# and operating on the given stack. The receiver of the 'message' must be at the bottom of the stack. +# The bytecodes can be composed from constants created in this module in above import_bytecodes() call. +def execute_frame(bytes, literals, stack): + space, interp, _, _ = read_image(imagename) + w_method = model.W_CompiledMethod(space, header=512) + w_method.literals = literals + w_method.setbytes(bytes) + w_receiver = stack[0] + s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) + w_frame = s_frame.w_self() + def interp_execute_bytes_with_stack(): + interp.loop(w_frame) + return interp_execute_bytes_with_stack - self.meta_interp(interp_w, [], listcomp=True, listops=True, backendopt=True, inline=True) +# This will build a JIT for the entire VM. +def full_vm(): + import targetimageloadingsmalltalk + argv = sys.argv + def interp_full_vm(): + targetimageloadingsmalltalk.entry_point(argv) + return interp_full_vm + +def main(): + # func = perform(model.W_SmallInteger(1000), 'loopTest2') + # func = perform(model.W_SmallInteger(777), 'name') + func = execute_frame([returnReceiver], [], [model.W_SmallInteger(42)]) + # func = full_vm() + meta_interp(func) + +# This is for execution using pytest.py. This way you can get a pdb on assertion-errors etc. +# Execute e.g. $ pypy ../pypy/pytest.py spyvm/test/jit.py -s --pdb -k test_main +def test_main(): + main() + +if __name__ == "__main__": + main() From noreply at buildbot.pypy.org Mon Apr 7 13:52:30 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 13:52:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Small consistency fixed in W_CompiledMethod and ClassShadow, regarding versioning. Message-ID: <20140407115230.58F871C350E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r775:493897022dac Date: 2014-04-04 16:59 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/493897022dac/ Log: Small consistency fixed in W_CompiledMethod and ClassShadow, regarding versioning. Made sure that each time a variable guarded by @constant_for_version is changed, self.changed() is called. Using jit.elide_promoted() for @constant_for_version. Added descriptive funtion name. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -16,7 +16,7 @@ """ import sys, weakref from spyvm import constants, error, version, storage_statistics -from spyvm.version import elidable_for_version, constant_for_version +from spyvm.version import elidable_for_version, constant_for_version, constant_for_version_arg from rpython.rlib import rrandom, objectmodel, jit, signature from rpython.rlib.rarithmetic import intmask, r_uint, r_int @@ -1164,6 +1164,8 @@ header (4 bytes) literals (4 bytes each) bytecodes (variable) + + An optional method trailer can be part of the bytecodes part. """ repr_classname = "W_CompiledMethod" @@ -1176,22 +1178,12 @@ # Additional info about the method "_likely_methodname", "w_compiledin" ] -### Extension from Squeak 3.9 doc, which we do not implement: -### trailer (variable) -### The trailer has two variant formats. In the first variant, the last -### byte is at least 252 and the last four bytes represent a source pointer -### into one of the sources files (see #sourcePointer). In the second -### variant, the last byte is less than 252, and the last several bytes -### are a compressed version of the names of the method's temporary -### variables. The number of bytes used for this purpose is the value of -### the last byte in the method. - _likely_methodname = "" import_from_mixin(version.VersionMixin) def __init__(self, space, bytecount=0, header=0): + self.bytes = ["\x00"] * bytecount self.setheader(space, header) - self.bytes = ["\x00"] * bytecount def fillin(self, space, g_self): # Implicitely sets the header, including self.literalsize @@ -1200,7 +1192,7 @@ self.setbytes(g_self.get_bytes()[self.bytecodeoffset():]) # === Setters === - + def setheader(self, space, header): _primitive, literalsize, islarge, tempsize, argsize = constants.decode_compiled_method_header(header) self.literalsize = literalsize @@ -1215,15 +1207,15 @@ def setliteral(self, index, w_lit): self.literals[index] = w_lit - self.changed() if index == len(self.literals): self.w_compiledin = None + self.changed() def setliterals(self, literals): """NOT RPYTHON""" # Only for testing, not safe. self.literals = literals + self.w_compiledin = None self.changed() - self.w_compiledin = None def setbytes(self, bytes): self.bytes = bytes @@ -1262,7 +1254,7 @@ def getheader(self): return self.header - @constant_for_version + @constant_for_version_arg def getliteral(self, index): return self.literals[index] @@ -1276,7 +1268,7 @@ # mc for methods with islarge flag turned on 32 return 16 + self.islarge * 40 + self.argsize - @constant_for_version + @constant_for_version_arg def getbytecode(self, pc): assert pc >= 0 and pc < len(self.bytes) return self.bytes[pc] @@ -1368,6 +1360,7 @@ copy = W_CompiledMethod(space, 0, self.getheader()) copy.bytes = list(self.bytes) copy.literals = list(self.literals) + copy.changed() return copy def invariant(self): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1,6 +1,6 @@ import sys, weakref from spyvm import model, constants, error, wrapper, version -from spyvm.version import elidable_for_version, constant_for_version +from spyvm.version import elidable_for_version, constant_for_version, constant_for_version_arg from rpython.tool.pairtype import extendabletype from rpython.rlib import rarithmetic, objectmodel, jit, longlong2float from rpython.rlib.objectmodel import import_from_mixin @@ -305,14 +305,18 @@ # In Slang the value is read directly as a boxed integer, so that # the code gets a "pointer" whose bits are set as above, but # shifted one bit to the left and with the lowest bit set to 1. - - # compute the instance size (really the size, not the number of bytes) + + # Compute the instance size (really the size, not the number of bytes) instsize_lo = (classformat >> 1) & 0x3F instsize_hi = (classformat >> (9 + 1)) & 0xC0 self._instance_size = (instsize_lo | instsize_hi) - 1 # subtract hdr # decode the instSpec format = (classformat >> 7) & 15 self.instance_varsized = format >= 2 + + # In case of raised exception below. + self.changed() + if format < 4: self.instance_kind = POINTERS elif format == 4: @@ -470,14 +474,16 @@ # _______________________________________________________________ # Other Methods - @constant_for_version + @constant_for_version_arg def lookup(self, w_selector): look_in_shadow = self while look_in_shadow is not None: w_method = look_in_shadow.s_methoddict().find_selector(w_selector) if w_method is not None: # Old images don't store compiledin-info in literals. - w_method.w_compiledin = look_in_shadow.w_self() + if not w_method.w_compiledin: + w_method.w_compiledin = look_in_shadow.w_self() + w_method.changed() return w_method look_in_shadow = look_in_shadow._s_superclass raise MethodNotFound(self, w_selector) diff --git a/spyvm/version.py b/spyvm/version.py --- a/spyvm/version.py +++ b/spyvm/version.py @@ -17,13 +17,24 @@ # be used in situations where the receiver is very unlikely to change in the same # context of the interpreted program (like classes or compiled methods). def constant_for_version(func): - @jit.elidable - def elidable_func(self, version, *args): - return func(self, *args) - def meth(self, *args): - self = jit.promote(self) - version = jit.promote(self.version) - return elidable_func(self, version, *args) + def versioned_func(self, version): + return func(self) + versioned_func.func_name = "constant_" + func.func_name + elidable_func = jit.elidable_promote()(versioned_func) + def meth(self): + return elidable_func(self, self.version) + meth.func_name = "constant_meth_" + func.func_name + return meth + +# Same as constant_for_version, but allows for one additional argument. +def constant_for_version_arg(func): + def versioned_func(self, version, arg): + return func(self, arg) + versioned_func.func_name = "constant_" + func.func_name + elidable_func = jit.elidable_promote()(versioned_func) + def meth(self, arg): + return elidable_func(self, self.version, arg) + meth.func_name = "constant_meth_" + func.func_name return meth class Version(object): From noreply at buildbot.pypy.org Mon Apr 7 13:52:31 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 13:52:31 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: To determine the containing class of a CompiledMethod, use the class it has been looked up in. Message-ID: <20140407115231.872F21C350E@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r776:fc5bb4c06b5f Date: 2014-04-04 17:24 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/fc5bb4c06b5f/ Log: To determine the containing class of a CompiledMethod, use the class it has been looked up in. This reduces dependence on literals being stored in a certain way we expect them to be. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1226,6 +1226,11 @@ self.bytes[index0] = character self.changed() + def set_compiled_in(self, w_compiledin): + if not self.w_compiledin: + self.w_compiledin = w_compiledin + self.changed() + # === Getters === def getclass(self, space): @@ -1277,23 +1282,31 @@ def compiled_in(self): w_compiledin = self.w_compiledin if not w_compiledin: - literals = self.literals - if literals and len(literals) > 0: - # (Blue book, p 607) Last of the literals is either the containing class - # or an association with compiledin as a class - w_candidate = literals[-1] - if isinstance(w_candidate, W_PointersObject) and w_candidate.has_space(): - space = w_candidate.space() # Not pretty to steal the space from another object. + # If the method has not been looked up from a methoddict yet, try to get the + # containing class from it's literals. This should be rare in practice. + w_compiledin = self.compiled_in_from_literals() + self.w_compiledin = w_compiledin + assert w_compiledin is None or isinstance(w_compiledin, W_PointersObject) + return w_compiledin + + @jit.dont_look_inside # Tracing into this function is useless. + def compiled_in_from_literals(self): + w_compiledin = None + literals = self.literals + if literals and len(literals) > 0: + # (Blue book, p 607) Last of the literals is either the containing class + # or an association with compiledin as a class + w_candidate = literals[-1] + if isinstance(w_candidate, W_PointersObject) and w_candidate.has_space(): + space = w_candidate.space() # Not pretty to steal the space from another object. + if w_candidate.is_class(space): + w_compiledin = w_candidate + elif w_candidate.size() >= 2: + from spyvm import wrapper + association = wrapper.AssociationWrapper(space, w_candidate) + w_candidate = association.value() if w_candidate.is_class(space): w_compiledin = w_candidate - elif w_candidate.size() >= 2: - from spyvm import wrapper - association = wrapper.AssociationWrapper(space, w_candidate) - w_candidate = association.value() - if w_candidate.is_class(space): - w_compiledin = w_candidate - self.w_compiledin = w_compiledin - assert w_compiledin is None or isinstance(w_compiledin, W_PointersObject) return w_compiledin # === Object Access === diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -480,10 +480,7 @@ while look_in_shadow is not None: w_method = look_in_shadow.s_methoddict().find_selector(w_selector) if w_method is not None: - # Old images don't store compiledin-info in literals. - if not w_method.w_compiledin: - w_method.w_compiledin = look_in_shadow.w_self() - w_method.changed() + w_method.set_compiled_in(look_in_shadow.w_self()) return w_method look_in_shadow = look_in_shadow._s_superclass raise MethodNotFound(self, w_selector) diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -93,6 +93,8 @@ class mockmethod(object): def __init__(self, val): self.val = val + def set_compiled_in(self, w): + pass w_class = bootstrap_class(0) shadow = w_class.as_class_get_shadow(space) shadow.installmethod(w_foo, mockmethod(1)) From noreply at buildbot.pypy.org Mon Apr 7 14:49:25 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 7 Apr 2014 14:49:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix total_allocated accounting (caused a flood of major collections after some Message-ID: <20140407124925.4A3B71C01F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1140:cfd37feb0f23 Date: 2014-04-07 14:50 +0200 http://bitbucket.org/pypy/stmgc/changeset/cfd37feb0f23/ Log: fix total_allocated accounting (caused a flood of major collections after some time) diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -80,7 +80,6 @@ /* thread-safe: use the lock of pages.c to prevent any remapping from occurring under our feet */ mutex_pages_lock(); - increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -52,6 +52,7 @@ #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) #define END_MARKER 0xDEADBEEF +#define MIN_ALLOC_SIZE (sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) @@ -88,7 +89,7 @@ The additional chunks of a given size are linked "vertically" in the secondary 'u' doubly-linked list. - + +-----+ | 296 | +-----+ @@ -258,8 +259,8 @@ /* it can be very small, but we need to ensure a minimal size (currently 32 bytes) */ - if (request_size < sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) - request_size = sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE; + if (request_size < MIN_ALLOC_SIZE) + request_size = MIN_ALLOC_SIZE; size_t index = largebin_index(request_size); sort_bin(index); @@ -333,6 +334,7 @@ } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; + increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); return (char *)&mscan->d; } @@ -343,6 +345,9 @@ assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); + /* 'size' is at least MIN_ALLOC_SIZE */ + increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); + #ifndef NDEBUG assert(chunk->size >= sizeof(dlist_t)); assert(chunk->size <= (((char *)last_chunk) - (char *)data)); @@ -554,7 +559,6 @@ chunk = next_chunk(chunk); /* go to the first non-free chunk */ while (chunk != last_chunk) { - /* here, the chunk we're pointing to is not free */ assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -566,8 +570,6 @@ /* use the callback to know if 'chunk' contains an object that survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { - size_t size = chunk->size; - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free((char *)&chunk->d); /* dies */ } chunk = mnext; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -243,7 +243,6 @@ } char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; From noreply at buildbot.pypy.org Mon Apr 7 14:51:42 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 7 Apr 2014 14:51:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc Message-ID: <20140407125142.739DB1C01F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70480:13693bb0b800 Date: 2014-04-07 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/13693bb0b800/ Log: import stmgc diff --git a/rpython/translator/stm/import_stmgc.py b/rpython/translator/stm/import_stmgc.py --- a/rpython/translator/stm/import_stmgc.py +++ b/rpython/translator/stm/import_stmgc.py @@ -26,6 +26,8 @@ for p in sorted(plist): if not (p.basename.endswith('.c') or p.basename.endswith('.h')): continue + if p.basename.startswith('.'): + continue if p.basename.startswith('demo'): continue path = stmgc_dest.join(p.relto(stmgc_dir)) diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -5dbd50990e2c +cfd37feb0f23+ diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -81,7 +81,6 @@ /* thread-safe: use the lock of pages.c to prevent any remapping from occurring under our feet */ mutex_pages_lock(); - increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c b/rpython/translator/stm/src_stm/stm/largemalloc.c --- a/rpython/translator/stm/src_stm/stm/largemalloc.c +++ b/rpython/translator/stm/src_stm/stm/largemalloc.c @@ -53,6 +53,7 @@ #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) #define END_MARKER 0xDEADBEEF +#define MIN_ALLOC_SIZE (sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) @@ -89,7 +90,7 @@ The additional chunks of a given size are linked "vertically" in the secondary 'u' doubly-linked list. - + +-----+ | 296 | +-----+ @@ -259,8 +260,8 @@ /* it can be very small, but we need to ensure a minimal size (currently 32 bytes) */ - if (request_size < sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) - request_size = sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE; + if (request_size < MIN_ALLOC_SIZE) + request_size = MIN_ALLOC_SIZE; size_t index = largebin_index(request_size); sort_bin(index); @@ -334,6 +335,7 @@ } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; + increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); return (char *)&mscan->d; } @@ -344,6 +346,9 @@ assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); + /* 'size' is at least MIN_ALLOC_SIZE */ + increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); + #ifndef NDEBUG assert(chunk->size >= sizeof(dlist_t)); assert(chunk->size <= (((char *)last_chunk) - (char *)data)); @@ -555,7 +560,6 @@ chunk = next_chunk(chunk); /* go to the first non-free chunk */ while (chunk != last_chunk) { - /* here, the chunk we're pointing to is not free */ assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -567,8 +571,6 @@ /* use the callback to know if 'chunk' contains an object that survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { - size_t size = chunk->size; - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free((char *)&chunk->d); /* dies */ } chunk = mnext; diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -244,7 +244,6 @@ } char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; From noreply at buildbot.pypy.org Mon Apr 7 17:22:20 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 7 Apr 2014 17:22:20 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: rename confusing _generalization_of and remove useless copies Message-ID: <20140407152220.582911C029E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70481:9a05f922f9ee Date: 2014-04-07 17:21 +0200 http://bitbucket.org/pypy/pypy/changeset/9a05f922f9ee/ Log: rename confusing _generalization_of and remove useless copies diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -76,7 +76,7 @@ self.fielddescrs = fielddescrs def generalization_of_renumbering_done(self, other, renum, bad): - if not self._generalization_of(other): + if not self._generalization_of_structpart(other): return False assert isinstance(other, AbstractVirtualStructStateInfo) @@ -94,7 +94,7 @@ return True - def _generalization_of(self, other): + def _generalization_of_structpart(self, other): raise NotImplementedError def enum_forced_boxes(self, boxes, value, optimizer): @@ -121,7 +121,7 @@ AbstractVirtualStructStateInfo.__init__(self, fielddescrs) self.known_class = known_class - def _generalization_of(self, other): + def _generalization_of_structpart(self, other): return (isinstance(other, VirtualStateInfo) and self.known_class.same_constant(other.known_class)) @@ -134,7 +134,7 @@ AbstractVirtualStructStateInfo.__init__(self, fielddescrs) self.typedescr = typedescr - def _generalization_of(self, other): + def _generalization_of_structpart(self, other): return (isinstance(other, VStructStateInfo) and self.typedescr is other.typedescr) @@ -147,12 +147,10 @@ def __init__(self, arraydescr): self.arraydescr = arraydescr - def _generalization_of(self, other): - return (isinstance(other, VArrayStateInfo) and - self.arraydescr is other.arraydescr) - def generalization_of_renumbering_done(self, other, renum, bad): - if not self._generalization_of(other): + if not isinstance(other, VArrayStateInfo): + return False + if self.arraydescr is not other.arraydescr: return False if len(self.fieldstate) != len(other.fieldstate): return False @@ -189,10 +187,11 @@ self.fielddescrs = fielddescrs def generalization_of_renumbering_done(self, other, renum, bad): - if not self._generalization_of(other): + if not isinstance(other, VArrayStructStateInfo): + return False + if not self.arraydescr is not other.arraydescr: return False - assert isinstance(other, VArrayStructStateInfo) if len(self.fielddescrs) != len(other.fielddescrs): return False @@ -209,10 +208,6 @@ p += 1 return True - def _generalization_of(self, other): - return (isinstance(other, VArrayStructStateInfo) and - self.arraydescr is other.arraydescr) - def _enum(self, virtual_state): for s in self.fieldstate: s.enum(virtual_state) From noreply at buildbot.pypy.org Mon Apr 7 17:33:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 7 Apr 2014 17:33:42 +0200 (CEST) Subject: [pypy-commit] pypy default: skip if no asmgcc Message-ID: <20140407153342.4F45D1C3566@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70482:415af8580183 Date: 2014-04-07 17:45 +0300 http://bitbucket.org/pypy/pypy/changeset/415af8580183/ Log: skip if no asmgcc diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -1,4 +1,4 @@ -import os, sys +import os, sys, py from rpython.tool.udir import udir from rpython.rlib.jit import JitDriver, unroll_parameters, set_param from rpython.rlib.jit import PARAMETERS, dont_look_inside @@ -7,7 +7,7 @@ from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy - +from rpython.config.config import ConfigError class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() @@ -252,6 +252,9 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) + except ConfigError,e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] From noreply at buildbot.pypy.org Mon Apr 7 17:33:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 7 Apr 2014 17:33:43 +0200 (CEST) Subject: [pypy-commit] pypy default: skip if no asmgcc Message-ID: <20140407153343.87CD81C3566@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70483:2d8eaa5f5079 Date: 2014-04-07 18:28 +0300 http://bitbucket.org/pypy/pypy/changeset/2d8eaa5f5079/ Log: skip if no asmgcc diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -5,7 +5,7 @@ """ import weakref -import os +import os, py from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype from rpython.rlib.jit import JitDriver, dont_look_inside @@ -13,6 +13,7 @@ from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir from rpython.config.translationoption import DEFL_GC +from rpython.config.config import ConfigError class X(object): @@ -166,6 +167,9 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) + except ConfigError, e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG From noreply at buildbot.pypy.org Mon Apr 7 19:34:42 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed an assert that was always True before, due to the surrounding parantheses (evaluating to a tuple). Message-ID: <20140407173442.D98681C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r779:26fa4b669836 Date: 2014-04-07 15:17 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/26fa4b669836/ Log: Fixed an assert that was always True before, due to the surrounding parantheses (evaluating to a tuple). diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -110,8 +110,7 @@ s_frame.push(w_result) else: len_unwrap_spec = len(unwrap_spec) - assert (len_unwrap_spec == len(inspect.getargspec(func)[0]) + 1, - "wrong number of arguments") + assert (len_unwrap_spec + 2 == len(inspect.getargspec(func)[0])), "wrong number of arguments" unrolling_unwrap_spec = unrolling_iterable(enumerate(unwrap_spec)) def wrapped(interp, s_frame, argument_count_m1, w_method=None): argument_count = argument_count_m1 + 1 # to account for the rcvr From noreply at buildbot.pypy.org Mon Apr 7 19:34:44 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:44 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed up the jit.py some more, now able to run images, code etc. Message-ID: <20140407173444.006A41C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r780:0d0d36f53266 Date: 2014-04-07 16:09 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0d0d36f53266/ Log: Fixed up the jit.py some more, now able to run images, code etc. diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -22,12 +22,14 @@ import_bytecodes(__name__) jit = LLJitMixin() +def print_result(res): + if res is not None: + print "Result: %r" % res + # Pass a function inside here to meta-interpret it and show all encountered loops. def meta_interp(func): res = jit.meta_interp(func, [], listcomp=True, listops=True, backendopt=True, inline=True) - print "Finished" - if res: - print res.__repr__() + print_result(res) # ==== The following are factories for functions to be passed into meta_interp() ==== @@ -56,26 +58,38 @@ # This will build a JIT for the entire VM. Arguments to the VM entry-point must be provided. def full_vm(args): import targetimageloadingsmalltalk + full_args = [sys.argv[0]] + full_args.extend([ str(a) for a in args ]) def interp_full_vm(): - return targetimageloadingsmalltalk.entry_point(args) + return targetimageloadingsmalltalk.entry_point(full_args) return interp_full_vm +def open_image(imagename, additional_args = []): + args = ["images/" + imagename] + args.extend(additional_args) + return full_vm(args) + def run_vm_code(imagename, code): - return full_vm(['images/' + imagename, '-r', code]) + return open_image(['-r', code]) -def execute_vm_method(imagename, selector, receiver_num = 0, string_arg=None): - args = ['images/' + imagename, '-m', selector, '-n', receiver_num] +def execute_vm_method(imagename, selector, receiver_num = None, string_arg=None): + args = ['-m', selector, '-n', receiver_num] if string_arg: args.extend(['-a', string_arg]) - return full_vm(args) + if receiver_num: + args.extend(['-n', receiver_num]) + return open_image(args) def main(): # func = perform(model.W_SmallInteger(1000), 'loopTest2') - func = perform(model.W_SmallInteger(777), 'name') + # func = perform(model.W_SmallInteger(777), 'name') # func = execute_frame([returnReceiver], [], [model.W_SmallInteger(42)]) - # func = full_vm() + # func = run_vm_code("mini.image", "^5+6") + # func = execute_vm_method("mini.image", "name", 33) + func = open_image("Squeak4.5-noBitBlt.image") - # func() + # import pdb; pdb.set_trace() + # print_result(func()) meta_interp(func) # This is for execution using pytest.py. This way you can get a pdb on assertion-errors etc. From noreply at buildbot.pypy.org Mon Apr 7 19:34:45 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:45 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Better output for debug_merge_point Message-ID: <20140407173445.21C111C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r781:8c5600acd5a3 Date: 2014-04-07 16:09 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8c5600acd5a3/ Log: Better output for debug_merge_point diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -19,7 +19,7 @@ def get_printable_location(pc, self, method): bc = ord(method.bytes[pc]) name = method.safe_identifier_string() - return '%d: [%s]%s (%s)' % (pc, hex(bc), BYTECODE_NAMES[bc], name) + return '(%s) [%d]: <%s>%s' % (name, pc, hex(bc), BYTECODE_NAMES[bc]) class Interpreter(object): @@ -825,11 +825,10 @@ result = [None] * 256 for entry in BYTECODE_RANGES: if len(entry) == 2: - positions = [entry[0]] + result[entry[0]] = entry[1] else: - positions = range(entry[0], entry[1]+1) - for pos in positions: - result[pos] = entry[-1] + for arg, pos in enumerate(range(entry[0], entry[1]+1)): + result[pos] = "%s(%s)" % (entry[2], arg) assert None not in result return result From noreply at buildbot.pypy.org Mon Apr 7 19:34:46 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:46 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed another assert that was always True. Message-ID: <20140407173446.3E8011C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r782:672696846ecf Date: 2014-04-07 17:03 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/672696846ecf/ Log: Fixed another assert that was always True. diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -45,8 +45,7 @@ MAJOR = major def decorator(func): len_unwrap_spec = len(unwrap_spec) - assert (len_unwrap_spec == len(inspect.getargspec(func)[0]) + 1, - "wrong number of arguments") + assert len_unwrap_spec == len(inspect.getargspec(func)[0]), "wrong number of arguments" unrolling_unwrap_spec = unrolling_iterable(enumerate(unwrap_spec)) def wrapped(*c_arguments): assert len_unwrap_spec == len(c_arguments) From noreply at buildbot.pypy.org Mon Apr 7 19:34:47 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:47 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: More fixes to jit.py. Message-ID: <20140407173447.5690C1C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r783:a04e692d3fb1 Date: 2014-04-07 19:07 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a04e692d3fb1/ Log: More fixes to jit.py. diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -15,9 +15,6 @@ from spyvm.test.util import import_bytecodes, read_image from spyvm import model, shadow -imagename = "mini.image" -# imagename = "minitest.image" - sys.setrecursionlimit(5000) import_bytecodes(__name__) jit = LLJitMixin() @@ -28,13 +25,12 @@ # Pass a function inside here to meta-interpret it and show all encountered loops. def meta_interp(func): - res = jit.meta_interp(func, [], listcomp=True, listops=True, backendopt=True, inline=True) - print_result(res) + return jit.meta_interp(func, [], listcomp=True, listops=True, backendopt=True, inline=True) # ==== The following are factories for functions to be passed into meta_interp() ==== # This will build a small jit just for the specified message-send -def perform(receiver, selector, *args): +def perform(imagename, receiver, selector, *args): _, interp, _, _ = read_image(imagename) def interp_miniloop(): return interp.perform(receiver, selector, *args) @@ -43,7 +39,7 @@ # This will build a jit executing a synthetic method composed of the given bytecodes and literals, # and operating on the given stack. The receiver of the 'message' must be at the bottom of the stack. # The bytecodes can be composed from constants created in this module in above import_bytecodes() call. -def execute_frame(bytes, literals, stack): +def execute_frame(imagename, bytes, literals, stack): space, interp, _, _ = read_image(imagename) w_method = model.W_CompiledMethod(space, header=512) w_method.literals = literals @@ -51,15 +47,17 @@ w_receiver = stack[0] s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) w_frame = s_frame.w_self() - def interp_execute_bytes_with_stack(): + def interp_execute_frame(): return interp.loop(w_frame) - return interp_execute_bytes_with_stack + return interp_execute_frame # This will build a JIT for the entire VM. Arguments to the VM entry-point must be provided. def full_vm(args): import targetimageloadingsmalltalk - full_args = [sys.argv[0]] + module_file = targetimageloadingsmalltalk.__file__[:-1] + full_args = [ module_file ] full_args.extend([ str(a) for a in args ]) + print ">> Entry Point arguments: %r" % full_args def interp_full_vm(): return targetimageloadingsmalltalk.entry_point(full_args) return interp_full_vm @@ -70,27 +68,40 @@ return full_vm(args) def run_vm_code(imagename, code): - return open_image(['-r', code]) - -def execute_vm_method(imagename, selector, receiver_num = None, string_arg=None): - args = ['-m', selector, '-n', receiver_num] + return open_image(imagename, ['-r', code]) + +def execute_vm_method(imagename, selector, receiver_num=None, string_arg=None): + args = ['-m', selector] if string_arg: args.extend(['-a', string_arg]) if receiver_num: args.extend(['-n', receiver_num]) - return open_image(args) + return open_image(imagename, args) def main(): - # func = perform(model.W_SmallInteger(1000), 'loopTest2') - # func = perform(model.W_SmallInteger(777), 'name') - # func = execute_frame([returnReceiver], [], [model.W_SmallInteger(42)]) - # func = run_vm_code("mini.image", "^5+6") + # ===== First define which image we are going to use. + imagename = "mini.image" + # imagename = "minitest.image" + # imagename = "Squeak4.5-noBitBlt.image" + + # ===== These entry-points pre-load the image and execute just a single frame. + # func = perform(imagename, model.W_SmallInteger(1000), 'loopTest2') + func = perform(imagename, model.W_SmallInteger(777), 'name') + # func = execute_frame(imagename, [returnReceiver], [], [model.W_SmallInteger(42)]) + + # ===== These execute the complete interpreter + # XXX These do not work because loading the image file while meta-interpreting always leads to + # a 'Bad file descriptor' error. # func = execute_vm_method("mini.image", "name", 33) - func = open_image("Squeak4.5-noBitBlt.image") + # func = run_vm_code(imagename, "^5+6") + # func = execute_vm_method(imagename, "name", 33) + # func = open_image(imagename) + # ===== Now we can either simply execute the entry-point, or meta-interpret it (showing all encountered loops). # import pdb; pdb.set_trace() - # print_result(func()) - meta_interp(func) + # res = func() + res = meta_interp(func) + print_result(res) # This is for execution using pytest.py. This way you can get a pdb on assertion-errors etc. # Execute e.g. $ pypy ../pypy/pytest.py spyvm/test/jit.py -s --pdb -k test_main From noreply at buildbot.pypy.org Mon Apr 7 19:34:48 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:48 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Moved image-loading code to ensure proper error handling. Message-ID: <20140407173448.673FB1C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r784:ff6b7ae58916 Date: 2014-04-07 19:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ff6b7ae58916/ Log: Moved image-loading code to ensure proper error handling. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -202,13 +202,13 @@ path = rpath.rabspath(path) try: f = open_file_as_stream(path, mode="rb", buffering=0) + try: + imagedata = f.readall() + finally: + f.close() except OSError as e: os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) return 1 - try: - imagedata = f.readall() - finally: - f.close() image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) From noreply at buildbot.pypy.org Mon Apr 7 19:34:49 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:49 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Not using the global space-variable in order to make the functions in the main-module reusable. Message-ID: <20140407173449.79E341C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r785:640f9e128a12 Date: 2014-04-07 19:33 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/640f9e128a12/ Log: Not using the global space-variable in order to make the functions in the main-module reusable. The space is always available through the interpreter object. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -18,14 +18,15 @@ from spyvm.plugins.vmdebugging import stop_ui_process stop_ui_process() - scheduler = wrapper.scheduler(interp.space) + space = interp.space + scheduler = wrapper.scheduler(space) w_hpp = scheduler.active_process() if space.unwrap_int(scheduler.active_process().fetch(space, 2)) > space.unwrap_int(w_hpp.fetch(space, 2)): w_hpp = scheduler.active_process() assert isinstance(w_hpp, model.W_PointersObject) w_benchmark_proc = model.W_PointersObject( - interp.space, - w_hpp.getclass(interp.space), + space, + w_hpp.getclass(space), w_hpp.size() ) @@ -43,7 +44,7 @@ w_benchmark_proc.store(space, 2, space.wrap_int(priority)) # make process eligible for scheduling - wrapper.ProcessWrapper(interp.space, w_benchmark_proc).put_to_sleep() + wrapper.ProcessWrapper(space, w_benchmark_proc).put_to_sleep() t1 = time.time() w_result = _run_image(interp) @@ -55,6 +56,7 @@ return -1 def _run_image(interp): + space = interp.space ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) w_ctx = ap.suspended_context() assert isinstance(w_ctx, model.W_PointersObject) @@ -67,9 +69,10 @@ def _run_code(interp, code, as_benchmark=False): import time selector = "codeTest%d" % int(time.time()) + space = interp.space try: w_result = interp.perform( - interp.space.w_SmallInteger, + space.w_SmallInteger, "compile:classified:notifying:", space.wrap_string("%s\r\n%s" % (selector, code)), space.wrap_string("spy-run-code"), @@ -97,21 +100,19 @@ else: return _run_benchmark(interp, 0, selector, "") - -space = objspace.ObjSpace() - def context_for(interp, number, benchmark, stringarg): # XXX: Copied from interpreter >> perform + space = interp.space argcount = 0 if stringarg == "" else 1 - w_receiver = interp.space.wrap_int(number) - w_selector = interp.perform(interp.space.wrap_string(benchmark), "asSymbol") + w_receiver = space.wrap_int(number) + w_selector = interp.perform(space.wrap_string(benchmark), "asSymbol") w_method = model.W_CompiledMethod(space, header=512) - w_method.literalatput0(interp.space, 1, w_selector) + w_method.literalatput0(space, 1, w_selector) w_method.setbytes([chr(131), chr(argcount << 5), chr(124)]) #returnTopFromMethod - s_frame = shadow.MethodContextShadow(interp.space, None, w_method, w_receiver, []) + s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) s_frame.push(w_receiver) if not stringarg == "": - s_frame.push(interp.space.wrap_string(stringarg)) + s_frame.push(space.wrap_string(stringarg)) return s_frame def _usage(argv): @@ -135,6 +136,7 @@ if len(argv) == idx + 1: raise RuntimeError("Error: missing argument after %s" % arg) +prebuilt_space = objspace.ObjSpace() def entry_point(argv): idx = 1 @@ -209,7 +211,8 @@ except OSError as e: os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) return 1 - + + space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=evented) From noreply at buildbot.pypy.org Mon Apr 7 19:34:50 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:34:50 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added comments to jit.py. Message-ID: <20140407173450.8F5D51C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r786:426988d32a41 Date: 2014-04-07 19:34 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/426988d32a41/ Log: Added comments to jit.py. Using the methods from the main module to execute things inside a pre-loaded image. diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -27,11 +27,14 @@ def meta_interp(func): return jit.meta_interp(func, [], listcomp=True, listops=True, backendopt=True, inline=True) +def load(imagename): + _, interp, _, _ = read_image(imagename) + return interp + # ==== The following are factories for functions to be passed into meta_interp() ==== -# This will build a small jit just for the specified message-send -def perform(imagename, receiver, selector, *args): - _, interp, _, _ = read_image(imagename) +def preload_perform(imagename, receiver, selector, *args): + interp = load(imagename) def interp_miniloop(): return interp.perform(receiver, selector, *args) return interp_miniloop @@ -39,8 +42,9 @@ # This will build a jit executing a synthetic method composed of the given bytecodes and literals, # and operating on the given stack. The receiver of the 'message' must be at the bottom of the stack. # The bytecodes can be composed from constants created in this module in above import_bytecodes() call. -def execute_frame(imagename, bytes, literals, stack): - space, interp, _, _ = read_image(imagename) +def preload_execute_frame(imagename, bytes, literals, stack): + interp = load(imagename) + space = interp.space w_method = model.W_CompiledMethod(space, header=512) w_method.literals = literals w_method.setbytes(bytes) @@ -51,7 +55,31 @@ return interp.loop(w_frame) return interp_execute_frame -# This will build a JIT for the entire VM. Arguments to the VM entry-point must be provided. +# ==== The following will pre-load images and build a jit based on methods from the entry-point module + +def run_benchmark(imagename, benchmark, number=0, arg=""): + import targetimageloadingsmalltalk + interp = load(imagename) + def interp_run_benchmark(): + return targetimageloadingsmalltalk._run_benchmark(interp, number, benchmark, arg) + return interp_run_benchmarks + +def run_code(imagename, code, as_benchmark=False): + import targetimageloadingsmalltalk + interp = load(imagename) + def interp_run_code(): + return targetimageloadingsmalltalk._run_code(interp, code, as_benchmark) + return interp_run_code + +def run_image(imagename): + import targetimageloadingsmalltalk + interp = load(imagename) + def interp_run_image(): + return targetimageloadingsmalltalk._run_image(imagename) + return interp_run_image + +# ==== The following will build a JIT for the real entry-point. + def full_vm(args): import targetimageloadingsmalltalk module_file = targetimageloadingsmalltalk.__file__[:-1] @@ -62,21 +90,23 @@ return targetimageloadingsmalltalk.entry_point(full_args) return interp_full_vm -def open_image(imagename, additional_args = []): +def full_vm_image(imagename, additional_args = []): args = ["images/" + imagename] args.extend(additional_args) return full_vm(args) -def run_vm_code(imagename, code): - return open_image(imagename, ['-r', code]) +def full_vm_code(imagename, code): + return full_vm_image(imagename, ['-r', code]) -def execute_vm_method(imagename, selector, receiver_num=None, string_arg=None): +def full_vm_method(imagename, selector, receiver_num=None, string_arg=None): args = ['-m', selector] if string_arg: args.extend(['-a', string_arg]) if receiver_num: args.extend(['-n', receiver_num]) - return open_image(imagename, args) + return full_vm_image(imagename, args) + +# ==== The Main coordinates above methods def main(): # ===== First define which image we are going to use. @@ -84,21 +114,25 @@ # imagename = "minitest.image" # imagename = "Squeak4.5-noBitBlt.image" - # ===== These entry-points pre-load the image and execute just a single frame. - # func = perform(imagename, model.W_SmallInteger(1000), 'loopTest2') - func = perform(imagename, model.W_SmallInteger(777), 'name') - # func = execute_frame(imagename, [returnReceiver], [], [model.W_SmallInteger(42)]) + # ===== These entry-points pre-load the image and directly execute a single frame. + # func = preload_perform(imagename, model.W_SmallInteger(1000), 'loopTest2') + # func = preload_perform(imagename, model.W_SmallInteger(777), 'name') + # func = preload_execute_frame(imagename, [returnReceiver], [], [model.W_SmallInteger(42)]) # ===== These execute the complete interpreter - # XXX These do not work because loading the image file while meta-interpreting always leads to - # a 'Bad file descriptor' error. - # func = execute_vm_method("mini.image", "name", 33) - # func = run_vm_code(imagename, "^5+6") - # func = execute_vm_method(imagename, "name", 33) - # func = open_image(imagename) + # ===== XXX These do not work because loading the image file while meta-interpreting always leads to + # ===== a 'Bad file descriptor' error. + # func = full_vm_code(imagename, "^5+6") + # func = full_vm_method(imagename, "name", 33) + # func = full_vm_image(imagename) + + # ==== These entry-points pre-load the image and then use methods from the entry-point module. + # ==== This is very close to what actually happens in the VM, but with a pre-loaded image. + # func = run_benchmark(imagename, "loopTest2", 10000) + func = run_code(imagename, "^6+7") + # func = run_image(imagename) # ===== Now we can either simply execute the entry-point, or meta-interpret it (showing all encountered loops). - # import pdb; pdb.set_trace() # res = func() res = meta_interp(func) print_result(res) From noreply at buildbot.pypy.org Mon Apr 7 19:42:49 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Apr 2014 19:42:49 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed run_image Message-ID: <20140407174249.AAAE91C01F7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r787:97a7e0670161 Date: 2014-04-07 19:42 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/97a7e0670161/ Log: Fixed run_image diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -75,7 +75,7 @@ import targetimageloadingsmalltalk interp = load(imagename) def interp_run_image(): - return targetimageloadingsmalltalk._run_image(imagename) + return targetimageloadingsmalltalk._run_image(interp) return interp_run_image # ==== The following will build a JIT for the real entry-point. @@ -129,8 +129,8 @@ # ==== These entry-points pre-load the image and then use methods from the entry-point module. # ==== This is very close to what actually happens in the VM, but with a pre-loaded image. # func = run_benchmark(imagename, "loopTest2", 10000) - func = run_code(imagename, "^6+7") - # func = run_image(imagename) + # func = run_code(imagename, "^6+7", as_benchmark=True) + func = run_image(imagename) # ===== Now we can either simply execute the entry-point, or meta-interpret it (showing all encountered loops). # res = func() From noreply at buildbot.pypy.org Mon Apr 7 20:54:01 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 7 Apr 2014 20:54:01 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140407185401.9BC391C0110@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70484:dbedd0a64d5d Date: 2014-04-07 11:53 -0700 http://bitbucket.org/pypy/pypy/changeset/dbedd0a64d5d/ Log: merge default diff too long, truncating to 2000 out of 3817 lines diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,7 +48,6 @@ def tearDown(self): os.chdir(self.old_dir) - gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -19,5 +19,5 @@ fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) imp.load_module('_ctypes_test', fp, filename, description) except ImportError: - print 'could not find _ctypes_test in',output_dir + print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -4,7 +4,6 @@ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ .. _`pypy/bin/pyinteractive.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/pyinteractive.py @@ -35,7 +34,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -49,21 +47,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -75,6 +73,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -93,16 +96,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, @@ -491,7 +485,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,25 +15,49 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the 2nd call +for donation (*not ready yet*) +.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html -Introduction and current status -=============================== + +Introduction +============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of -PyPy, comparing the JITting version in both cases. It is called STM for -Software Transactional Memory, which is the internal technique used (see -`Reference to implementation details`_). +listed below, it should be in theory within 25%-50% of the speed of a +regular PyPy, comparing the JITting version in both cases. It is called +STM for Software Transactional Memory, which is the internal technique +used (see `Reference to implementation details`_). + +What you get in exchange for this slow-down is that ``pypy-stm`` runs +any multithreaded Python program on multiple CPUs at once. Programs +running two threads or more in parallel should ideally run faster than +in a regular PyPy, either now or soon as issues are fixed. In one way, +that's all there is to it: this is a GIL-less Python, feel free to +`download and try it`__. However, the deeper idea behind the +``pypy-stm`` project is to improve what is so far the state-of-the-art +for using multiple CPUs, which for cases where separate processes don't +work is done by writing explicitly multi-threaded programs. Instead, +``pypy-stm`` is flushing forward an approach to *hide* the threads, as +described below in `atomic sections`_. + + +.. __: + +Current status +============== **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only interested in trying it out, you can download a Ubuntu 12.04 binary -here__. The current version supports four "segments", which means that -it will run up to four threads in parallel (in other words, you get a -GIL effect again, but only if trying to execute more than 4 threads). +here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, +but not stripped of debug symbols). The current version supports four +"segments", which means that it will run up to four threads in parallel +(in other words, you get a GIL effect again, but only if trying to +execute more than 4 threads). To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like @@ -46,17 +70,19 @@ rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py .. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ -.. __: http://buildbot.pypy.org/nightly/stmgc-c7/ +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/misc/ .. __: http://clang.llvm.org/get_started.html .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ Caveats: -* It should generally work. Please do `report bugs`_ that manifest as a - crash or wrong behavior (markedly different from the behavior of a - regular PyPy). Performance bugs are likely to be known issues; we're - working on them. +* So far, small examples work fine, but there are still a number of + bugs. We're busy fixing them. + +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not detected correctly, so may + cause segmentation faults. * The JIT warm-up time is abysmal (as opposed to the regular PyPy's, which is "only" bad). Moreover, you should run it with a command like @@ -73,9 +99,11 @@ programs that modify large lists or dicts, suffer from these missing optimizations. -* The GC has no support for destructors: the ``__del__`` method is - never called (including on file objects, which won't be closed for - you). This is of course temporary. +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in @@ -98,6 +126,7 @@ probably, several days or more. .. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h @@ -172,9 +201,9 @@ unchanged. This capability can be hidden in a library or in the framework you use; the end user's code does not need to be explicitly aware of using threads. For a simple example of this, see -`lib_pypy/transaction.py`_. The idea is that if you have a program -where the function ``f(key, value)`` runs on every item of some big -dictionary, you can replace the loop with:: +`transaction.py`_ in ``lib_pypy``. The idea is that if you have a +program where the function ``f(key, value)`` runs on every item of some +big dictionary, you can replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) @@ -185,7 +214,18 @@ result is that the behavior should be exactly equivalent: you don't get any extra multithreading issue. -.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +This approach hides the notion of threads from the end programmer, +including all the hard multithreading-related issues. This is not the +first alternative approach to explicit threads; for example, OpenMP_ is +one. However, it is one of the first ones which does not require the +code to be organized in a particular fashion. Instead, it works on any +Python program which has got latent, imperfect parallelism. Ideally, it +only requires that the end programmer identifies where this parallelism +is likely to be found, and communicates it to the system, using for +example the ``transaction.add()`` scheme. + +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP ================== @@ -221,7 +261,7 @@ The core of the implementation is in a separate C library called stmgc_, in the c7_ subdirectory. Please see the `README.txt`_ for more -information. +information. In particular, the notion of segment is discussed there. .. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ .. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -86,7 +86,7 @@ option (this is the default at some optimization levels like ``-O1``, but unneeded for high-performance translations like ``-O2``). You may get it at -http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc-7.1.tar.gz +http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with pypy. Unpack this folder in the base directory. Then open a command diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -1,10 +1,17 @@ import py from rpython.config.translationoption import get_combined_translation_config from rpython.config.translationoption import set_opt_level -from rpython.config.config import ConflictConfigError +from rpython.config.config import ConflictConfigError, ConfigError +from rpython.translator.platform import platform as compiler def test_no_gcrootfinder_with_boehm(): config = get_combined_translation_config() config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') + +if compiler.name == 'msvc': + def test_no_asmgcrot_on_msvc(): + config = get_combined_translation_config() + py.test.raises(ConfigError, config.translation.setoption, + 'gcrootfinder', 'asmgcc', 'user') diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,9 +1,10 @@ import sys import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption -from rpython.config.config import ChoiceOption, StrOption, Config +from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors +from rpython.translator.platform import platform as compiler DEFL_INLINE_THRESHOLD = 32.4 # just enough to inline add__Int_Int() # and just small enough to prevend inlining of some rlist functions. @@ -16,8 +17,13 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] +elif compiler.name == 'msvc': + DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -85,7 +91,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ROOTFINDERS, "shadowstack", cmdline="--gcrootfinder", requires={ diff --git a/rpython/flowspace/test/test_unroll.py b/rpython/flowspace/test/test_unroll.py --- a/rpython/flowspace/test/test_unroll.py +++ b/rpython/flowspace/test/test_unroll.py @@ -1,23 +1,10 @@ import operator + from rpython.flowspace.test.test_objspace import Base -from rpython.rlib.unroll import unrolling_zero, unrolling_iterable +from rpython.rlib.unroll import unrolling_iterable + class TestUnroll(Base): - - def test_unrolling_int(self): - l = range(10) - def f(tot): - i = unrolling_zero - while i < len(l): - tot += l[i] - i = i + 1 - return tot*2 - assert f(0) == sum(l)*2 - - graph = self.codetest(f) - ops = self.all_operations(graph) - assert ops == {'inplace_add': 10, 'mul': 1} - def test_unroller(self): l = unrolling_iterable(range(10)) def f(tot): diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -5,7 +5,7 @@ """ import weakref -import os +import os, py from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype from rpython.rlib.jit import JitDriver, dont_look_inside @@ -13,6 +13,7 @@ from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir from rpython.config.translationoption import DEFL_GC +from rpython.config.config import ConfigError class X(object): @@ -166,6 +167,9 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) + except ConfigError, e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -1,4 +1,4 @@ -import os, sys +import os, sys, py from rpython.tool.udir import udir from rpython.rlib.jit import JitDriver, unroll_parameters, set_param from rpython.rlib.jit import PARAMETERS, dont_look_inside @@ -7,7 +7,7 @@ from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy - +from rpython.config.config import ConfigError class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() @@ -252,6 +252,9 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) + except ConfigError,e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py @@ -1,4 +1,9 @@ +import py from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests +from rpython.translator.platform import platform as compiler + +if compiler.name == 'msvc': + py.test.skip('asmgcc buggy on msvc') class TestAsmGcc(CompileFrameworkTests): gcrootfinder = "asmgcc" diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -567,7 +567,8 @@ # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. - hash = self.status & self.ST_SHIFT_MASK + hash = self.status + assert hash == (self.status & self.ST_SHIFT_MASK) # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. diff --git a/rpython/jit/metainterp/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py rename from rpython/jit/metainterp/test/test_intbound.py rename to rpython/jit/metainterp/optimizeopt/test/test_intbound.py --- a/rpython/jit/metainterp/test/test_intbound.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py @@ -1,5 +1,7 @@ from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 + from copy import copy import sys from rpython.rlib.rarithmetic import LONG_BIT @@ -235,8 +237,8 @@ for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)): #assert not b.lshift_bound(shift_count_bound).has_upper assert not b.rshift_bound(shift_count_bound).has_upper - - + + def test_div_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -258,7 +260,6 @@ assert a.contains(0) - def test_sub_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -271,3 +272,14 @@ a=bound(2, 4).sub_bound(bound(1, 2)) assert not a.contains(-1) assert not a.contains(4) + + +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py deleted file mode 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py +++ /dev/null @@ -1,12 +0,0 @@ -from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 - - -def test_next_pow2_m1(): - assert next_pow2_m1(0) == 0 - assert next_pow2_m1(1) == 1 - assert next_pow2_m1(7) == 7 - assert next_pow2_m1(256) == 511 - assert next_pow2_m1(255) == 255 - assert next_pow2_m1(80) == 127 - assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 - assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -0,0 +1,1181 @@ +from __future__ import with_statement +import py +from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ + VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes +from rpython.jit.metainterp.optimizeopt.optimizer import OptValue +from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ + equaloplists, FakeDescrWithSnapshot +from rpython.jit.metainterp.optimizeopt.intutils import IntBound +from rpython.jit.metainterp.history import TreeLoop, JitCellToken +from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData +from rpython.jit.metainterp.resoperation import ResOperation, rop + +class TestBasic: + someptr1 = LLtypeMixin.myptr + someptr2 = LLtypeMixin.myptr2 + + def test_position_generalization(self): + def postest(info1, info2): + info1.position = 0 + assert info1.generalization_of(info1, {}, {}) + info2.position = 0 + assert info1.generalization_of(info2, {}, {}) + info2.position = 1 + renum = {} + assert info1.generalization_of(info2, renum, {}) + assert renum == {0:1} + assert info1.generalization_of(info2, {0:1}, {}) + assert info1.generalization_of(info2, {1:1}, {}) + bad = {} + assert not info1.generalization_of(info2, {0:0}, bad) + assert info1 in bad and info2 in bad + + for BoxType in (BoxInt, BoxFloat, BoxPtr): + info1 = NotVirtualStateInfo(OptValue(BoxType())) + info2 = NotVirtualStateInfo(OptValue(BoxType())) + postest(info1, info2) + + info1, info2 = VArrayStateInfo(42), VArrayStateInfo(42) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + info1, info2 = VStructStateInfo(42, []), VStructStateInfo(42, []) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + info1, info2 = VirtualStateInfo(ConstInt(42), []), VirtualStateInfo(ConstInt(42), []) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + def test_NotVirtualStateInfo_generalization(self): + def isgeneral(value1, value2): + info1 = NotVirtualStateInfo(value1) + info1.position = 0 + info2 = NotVirtualStateInfo(value2) + info2.position = 0 + return info1.generalization_of(info2, {}, {}) + + assert isgeneral(OptValue(BoxInt()), OptValue(ConstInt(7))) + assert not isgeneral(OptValue(ConstInt(7)), OptValue(BoxInt())) + + ptr = OptValue(BoxPtr()) + nonnull = OptValue(BoxPtr()) + nonnull.make_nonnull(0) + knownclass = OptValue(BoxPtr()) + knownclass.make_constant_class(ConstPtr(self.someptr1), 0) + const = OptValue(BoxPtr) + const.make_constant_class(ConstPtr(self.someptr1), 0) + const.make_constant(ConstPtr(self.someptr1)) + inorder = [ptr, nonnull, knownclass, const] + for i in range(len(inorder)): + for j in range(i, len(inorder)): + assert isgeneral(inorder[i], inorder[j]) + if i != j: + assert not isgeneral(inorder[j], inorder[i]) + + value1 = OptValue(BoxInt()) + value2 = OptValue(BoxInt()) + value2.intbound.make_lt(IntBound(10, 10)) + assert isgeneral(value1, value2) + assert not isgeneral(value2, value1) + + assert isgeneral(OptValue(ConstInt(7)), OptValue(ConstInt(7))) + S = lltype.GcStruct('S') + foo = lltype.malloc(S) + fooref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) + assert isgeneral(OptValue(ConstPtr(fooref)), + OptValue(ConstPtr(fooref))) + + value1 = OptValue(BoxPtr()) + value1.make_nonnull(None) + value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + assert not isgeneral(value1, value2) + + def test_field_matching_generalization(self): + const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) + const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) + const1.position = const2.position = 1 + assert not const1.generalization_of(const2, {}, {}) + assert not const2.generalization_of(const1, {}, {}) + + def fldtst(info1, info2): + info1.position = info2.position = 0 + info1.fieldstate = [const1] + info2.fieldstate = [const2] + assert not info1.generalization_of(info2, {}, {}) + assert not info2.generalization_of(info1, {}, {}) + assert info1.generalization_of(info1, {}, {}) + assert info2.generalization_of(info2, {}, {}) + fldtst(VArrayStateInfo(42), VArrayStateInfo(42)) + fldtst(VStructStateInfo(42, [7]), VStructStateInfo(42, [7])) + fldtst(VirtualStateInfo(ConstInt(42), [7]), VirtualStateInfo(ConstInt(42), [7])) + + def test_known_class_generalization(self): + knownclass1 = OptValue(BoxPtr()) + knownclass1.make_constant_class(ConstPtr(self.someptr1), 0) + info1 = NotVirtualStateInfo(knownclass1) + info1.position = 0 + knownclass2 = OptValue(BoxPtr()) + knownclass2.make_constant_class(ConstPtr(self.someptr1), 0) + info2 = NotVirtualStateInfo(knownclass2) + info2.position = 0 + assert info1.generalization_of(info2, {}, {}) + assert info2.generalization_of(info1, {}, {}) + + knownclass3 = OptValue(BoxPtr()) + knownclass3.make_constant_class(ConstPtr(self.someptr2), 0) + info3 = NotVirtualStateInfo(knownclass3) + info3.position = 0 + assert not info1.generalization_of(info3, {}, {}) + assert not info2.generalization_of(info3, {}, {}) + assert not info3.generalization_of(info2, {}, {}) + assert not info3.generalization_of(info1, {}, {}) + + + def test_circular_generalization(self): + for info in (VArrayStateInfo(42), VStructStateInfo(42, [7]), + VirtualStateInfo(ConstInt(42), [7])): + info.position = 0 + info.fieldstate = [info] + assert info.generalization_of(info, {}, {}) + + +class BaseTestGenerateGuards(BaseTest): + def guards(self, info1, info2, box, expected): + info1.position = info2.position = 0 + guards = [] + info1.generate_guards(info2, box, self.cpu, guards, {}) + self.compare(guards, expected, [box]) + + def compare(self, guards, expected, inputargs): + loop = self.parse(expected) + boxmap = {} + assert len(loop.inputargs) == len(inputargs) + for a, b in zip(loop.inputargs, inputargs): + boxmap[a] = b + for op in loop.operations: + if op.is_guard(): + op.setdescr(None) + assert equaloplists(guards, loop.operations, False, + boxmap) + def test_intbounds(self): + value1 = OptValue(BoxInt()) + value1.intbound.make_ge(IntBound(0, 10)) + value1.intbound.make_le(IntBound(20, 30)) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(BoxInt())) + expected = """ + [i0] + i1 = int_ge(i0, 0) + guard_true(i1) [] + i2 = int_le(i0, 30) + guard_true(i2) [] + """ + self.guards(info1, info2, BoxInt(15), expected) + py.test.raises(InvalidLoop, self.guards, + info1, info2, BoxInt(50), expected) + + + def test_known_class(self): + value1 = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value1.make_constant_class(classbox, -1) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(self.nodebox)) + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, self.nodebox, expected) + py.test.raises(InvalidLoop, self.guards, + info1, info2, BoxPtr(), expected) + + def test_known_class_value(self): + value1 = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value1.make_constant_class(classbox, -1) + box = self.nodebox + guards = value1.make_guards(box) + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.compare(guards, expected, [box]) + + def test_known_value(self): + value1 = OptValue(self.nodebox) + value1.make_constant(ConstInt(1)) + box = self.nodebox + guards = value1.make_guards(box) + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.compare(guards, expected, [box]) + + def test_equal_inputargs(self): + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + vstate1 = VirtualState([knownclass_info, knownclass_info]) + assert vstate1.generalization_of(vstate1) + + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + vstate2 = VirtualState([unknown_info1, unknown_info1]) + assert vstate2.generalization_of(vstate2) + assert not vstate1.generalization_of(vstate2) + assert vstate2.generalization_of(vstate1) + + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + unknown_info2 = NotVirtualStateInfo(OptValue(self.nodebox)) + vstate3 = VirtualState([unknown_info1, unknown_info2]) + assert vstate3.generalization_of(vstate2) + assert vstate3.generalization_of(vstate1) + assert not vstate2.generalization_of(vstate3) + assert not vstate1.generalization_of(vstate3) + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + guards = [] + vstate1.generate_guards(vstate2, [self.nodebox, self.nodebox], self.cpu, guards) + self.compare(guards, expected, [self.nodebox]) + + with py.test.raises(InvalidLoop): + guards = [] + vstate1.generate_guards(vstate3, [self.nodebox, self.nodebox], + self.cpu, guards) + with py.test.raises(InvalidLoop): + guards = [] + vstate2.generate_guards(vstate3, [self.nodebox, self.nodebox], + self.cpu, guards) + + def test_virtuals_with_equal_fields(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(42), [1, 2]) + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + info2.fieldstate = [unknown_info1, unknown_info1] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + assert not vstate1.generalization_of(vstate2) + assert vstate2.generalization_of(vstate1) + + info3 = VirtualStateInfo(ConstInt(42), [1, 2]) + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + unknown_info2 = NotVirtualStateInfo(OptValue(self.nodebox)) + info3.fieldstate = [unknown_info1, unknown_info2] + vstate3 = VirtualState([info3]) + assert vstate3.generalization_of(vstate2) + assert vstate3.generalization_of(vstate1) + assert not vstate2.generalization_of(vstate3) + assert not vstate1.generalization_of(vstate3) + + def test_virtuals_with_nonmatching_fields(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_virtuals_with_nonmatching_descrs(self): + info1 = VirtualStateInfo(ConstInt(42), [10, 20]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_virtuals_with_nonmatching_classes(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(7), [1, 2]) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_nonvirtual_is_not_virtual(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = NotVirtualStateInfo(value) + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_arrays_with_nonmatching_fields(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VArrayStateInfo(42) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_arrays_of_different_sizes(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_arrays_with_nonmatching_types(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VArrayStateInfo(7) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_nonvirtual_is_not_array(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = NotVirtualStateInfo(value) + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + +class BaseTestBridges(BaseTest): + enable_opts = "intbounds:rewrite:virtualize:string:pure:heap:unroll" + + def _do_optimize_bridge(self, bridge, call_pure_results): + from rpython.jit.metainterp.optimizeopt import optimize_trace + from rpython.jit.metainterp.optimizeopt.util import args_dict + + self.bridge = bridge + bridge.call_pure_results = args_dict() + if call_pure_results is not None: + for k, v in call_pure_results.items(): + bridge.call_pure_results[list(k)] = v + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + if hasattr(self, 'vrefinfo'): + metainterp_sd.virtualref_info = self.vrefinfo + if hasattr(self, 'callinfocollection'): + metainterp_sd.callinfocollection = self.callinfocollection + # + bridge.resume_at_jump_descr = FakeDescrWithSnapshot() + optimize_trace(metainterp_sd, bridge, self.enable_opts) + + + def optimize_bridge(self, loops, bridge, expected, expected_target='Loop', **boxvalues): + if isinstance(loops, str): + loops = (loops, ) + loops = [self.parse(loop) for loop in loops] + bridge = self.parse(bridge) + for loop in loops: + loop.preamble = self.unroll_and_optimize(loop) + preamble = loops[0].preamble + token = JitCellToken() + token.target_tokens = [l.operations[0].getdescr() for l in [preamble] + loops] + + boxes = {} + for b in bridge.inputargs + [op.result for op in bridge.operations]: + boxes[str(b)] = b + for b, v in boxvalues.items(): + boxes[b].value = v + bridge.operations[-1].setdescr(token) + self._do_optimize_bridge(bridge, None) + if bridge.operations[-1].getopnum() == rop.LABEL: + assert expected == 'RETRACE' + return + + print '\n'.join([str(o) for o in bridge.operations]) + expected = self.parse(expected) + self.assert_equal(bridge, expected) + + if expected_target == 'Preamble': + assert bridge.operations[-1].getdescr() is preamble.operations[0].getdescr() + elif expected_target == 'Loop': + assert len(loops) == 1 + assert bridge.operations[-1].getdescr() is loops[0].operations[0].getdescr() + elif expected_target.startswith('Loop'): + n = int(expected_target[4:]) + assert bridge.operations[-1].getdescr() is loops[n].operations[0].getdescr() + else: + assert False + + def test_nonnull(self): + loop = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p0) + """ + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + guard_nonnull(p0) [] + jump(p0) + """ + self.optimize_bridge(loop, bridge, 'RETRACE', p0=self.nullptr) + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + self.optimize_bridge(loop, expected, expected, p0=self.myptr) + self.optimize_bridge(loop, expected, expected, p0=self.nullptr) + + def test_cached_nonnull(self): + loop = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + guard_nonnull(p1) [] + call(p1, descr=nonwritedescr) + jump(p0) + """ + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + guard_nonnull(p0) [] + p1 = getfield_gc(p0, descr=nextdescr) + guard_nonnull(p1) [] + jump(p0, p1) + """ + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + + def test_cached_unused_nonnull(self): + loop = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + guard_nonnull(p1) [] + jump(p0) + """ + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + guard_nonnull(p0) [] + p1 = getfield_gc(p0, descr=nextdescr) + guard_nonnull(p1) [] + jump(p0) + """ + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + + def test_cached_invalid_nonnull(self): + loop = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + guard_nonnull(p1) [] + jump(p0) + """ + bridge = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + guard_value(p1, ConstPtr(nullptr)) [] + jump(p0) + """ + self.optimize_bridge(loop, bridge, bridge, 'Preamble', p0=self.myptr) + + def test_multiple_nonnull(self): + loops = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + jump(p0) + """, """ + [p0] + jump(p0) + """ + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + jump(p0) + """ + self.optimize_bridge(loops, bridge, expected, 'Loop1', p0=self.nullptr) + expected = """ + [p0] + guard_nonnull(p0) [] + jump(p0) + """ + self.optimize_bridge(loops, bridge, expected, 'Loop0', p0=self.myptr) + + def test_constant(self): + loops = """ + [p0] + p1 = same_as(ConstPtr(myptr)) + jump(p1) + """, """ + [p0] + p1 = same_as(ConstPtr(myptr2)) + jump(p1) + """, """ + [p0] + jump(p0) + """ + expected = """ + [p0] + jump() + """ + self.optimize_bridge(loops, loops[0], expected, 'Loop0') + self.optimize_bridge(loops, loops[1], expected, 'Loop1') + expected = """ + [p0] + jump(p0) + """ + self.optimize_bridge(loops, loops[2], expected, 'Loop2') + + def test_cached_constant(self): + loop = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + guard_value(p1, ConstPtr(myptr)) [] + jump(p0) + """ + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + guard_nonnull(p0) [] + p1 = getfield_gc(p0, descr=nextdescr) + guard_value(p1, ConstPtr(myptr)) [] + jump(p0) + """ + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + + def test_virtual(self): + loops = """ + [p0, p1] + p2 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p2, 7, descr=adescr) + setfield_gc(p2, 42, descr=bdescr) + jump(p2, p1) + """,""" + [p0, p1] + p2 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p2, 9, descr=adescr) + jump(p2, p1) + """ + expected = """ + [p0, p1] + jump(p1) + """ + self.optimize_bridge(loops, loops[0], expected, 'Loop0') + self.optimize_bridge(loops, loops[1], expected, 'Loop1') + bridge = """ + [p0, p1] + p2 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p2, 42, descr=adescr) + setfield_gc(p2, 7, descr=bdescr) + jump(p2, p1) + """ + self.optimize_bridge(loops, bridge, "RETRACE") + bridge = """ + [p0, p1] + p2 = new_with_vtable(ConstClass(node_vtable)) + setfield_gc(p2, p1, descr=nextdescr) + setfield_gc(p2, 7, descr=adescr) + jump(p2, p1) + """ + self.optimize_bridge(loops, bridge, "RETRACE") + + def test_known_class(self): + loops = """ + [p0] + guard_nonnull_class(p0, ConstClass(node_vtable)) [] + jump(p0) + """,""" + [p0] + guard_nonnull_class(p0, ConstClass(node_vtable2)) [] + jump(p0) + """ + bridge = """ + [p0] + jump(p0) + """ + self.optimize_bridge(loops, bridge, loops[0], 'Loop0', p0=self.nodebox.value) + self.optimize_bridge(loops, bridge, loops[1], 'Loop1', p0=self.nodebox2.value) + self.optimize_bridge(loops[0], bridge, 'RETRACE', p0=self.nodebox2.value) + self.optimize_bridge(loops, loops[0], loops[0], 'Loop0', p0=self.nullptr) + self.optimize_bridge(loops, loops[1], loops[1], 'Loop1', p0=self.nullptr) + + def test_cached_known_class(self): + loop = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + guard_class(p1, ConstClass(node_vtable)) [] + jump(p0) + """ + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + guard_nonnull(p0) [] + p1 = getfield_gc(p0, descr=nextdescr) + guard_nonnull_class(p1, ConstClass(node_vtable)) [] + jump(p0) + """ + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + + + def test_lenbound_array(self): + loop = """ + [p0] + i2 = getarrayitem_gc(p0, 10, descr=arraydescr) + call(i2, descr=nonwritedescr) + jump(p0) + """ + expected = """ + [p0] + i2 = getarrayitem_gc(p0, 10, descr=arraydescr) + call(i2, descr=nonwritedescr) + jump(p0, i2) + """ + self.optimize_bridge(loop, loop, expected, 'Loop0') + bridge = """ + [p0] + i2 = getarrayitem_gc(p0, 15, descr=arraydescr) + jump(p0) + """ + expected = """ + [p0] + i2 = getarrayitem_gc(p0, 15, descr=arraydescr) + i3 = getarrayitem_gc(p0, 10, descr=arraydescr) + jump(p0, i3) + """ + self.optimize_bridge(loop, bridge, expected, 'Loop0') + bridge = """ + [p0] + i2 = getarrayitem_gc(p0, 5, descr=arraydescr) + jump(p0) + """ + self.optimize_bridge(loop, bridge, 'RETRACE') + bridge = """ + [p0] + jump(p0) + """ + self.optimize_bridge(loop, bridge, 'RETRACE') + + def test_cached_lenbound_array(self): + loop = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + i2 = getarrayitem_gc(p1, 10, descr=arraydescr) + call(i2, descr=nonwritedescr) + jump(p0) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + i2 = getarrayitem_gc(p1, 10, descr=arraydescr) + call(i2, descr=nonwritedescr) + i3 = arraylen_gc(p1, descr=arraydescr) # Should be killed by backend + jump(p0, i2, p1) + """ + self.optimize_bridge(loop, loop, expected) + bridge = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + i2 = getarrayitem_gc(p1, 15, descr=arraydescr) + jump(p0) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + i2 = getarrayitem_gc(p1, 15, descr=arraydescr) + i3 = arraylen_gc(p1, descr=arraydescr) # Should be killed by backend + i4 = getarrayitem_gc(p1, 10, descr=arraydescr) + jump(p0, i4, p1) + """ + self.optimize_bridge(loop, bridge, expected) + bridge = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + i2 = getarrayitem_gc(p1, 5, descr=arraydescr) + jump(p0) + """ + expected = """ + [p0] + p1 = getfield_gc(p0, descr=nextdescr) + i2 = getarrayitem_gc(p1, 5, descr=arraydescr) + i3 = arraylen_gc(p1, descr=arraydescr) # Should be killed by backend + i4 = int_ge(i3, 11) + guard_true(i4) [] + i5 = getarrayitem_gc(p1, 10, descr=arraydescr) + jump(p0, i5, p1) + """ + self.optimize_bridge(loop, bridge, expected) + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + guard_nonnull(p0) [] + p1 = getfield_gc(p0, descr=nextdescr) + guard_nonnull(p1) [] + i3 = arraylen_gc(p1, descr=arraydescr) # Should be killed by backend + i4 = int_ge(i3, 11) + guard_true(i4) [] + i5 = getarrayitem_gc(p1, 10, descr=arraydescr) + jump(p0, i5, p1) + """ + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + + def test_cached_setarrayitem_gc(self): + loop = """ + [p0, p1] + p2 = getfield_gc(p0, descr=nextdescr) + pp = getarrayitem_gc(p2, 0, descr=arraydescr) + call(pp, descr=nonwritedescr) + p3 = getfield_gc(p1, descr=nextdescr) + setarrayitem_gc(p2, 0, p3, descr=arraydescr) + jump(p0, p3) + """ + bridge = """ + [p0, p1] + jump(p0, p1) + """ + expected = """ + [p0, p1] + guard_nonnull(p0) [] + p2 = getfield_gc(p0, descr=nextdescr) + guard_nonnull(p2) [] + i5 = arraylen_gc(p2, descr=arraydescr) + i6 = int_ge(i5, 1) + guard_true(i6) [] + p3 = getarrayitem_gc(p2, 0, descr=arraydescr) + jump(p0, p1, p3, p2) + """ + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + + def test_cache_constant_setfield(self): + loop = """ + [p5] + i10 = getfield_gc(p5, descr=valuedescr) + call(i10, descr=nonwritedescr) + setfield_gc(p5, 1, descr=valuedescr) + jump(p5) + """ + bridge = """ + [p0] + jump(p0) + """ + expected = """ + [p0] + guard_nonnull(p0) [] + i10 = getfield_gc(p0, descr=valuedescr) + guard_value(i10, 1) [] + jump(p0) + """ + self.optimize_bridge(loop, bridge, expected, p0=self.myptr) + bridge = """ + [p0] + setfield_gc(p0, 7, descr=valuedescr) + jump(p0) + """ + expected = """ + [p0] + setfield_gc(p0, 7, descr=valuedescr) + jump(p0) + """ + self.optimize_bridge(loop, bridge, expected, 'Preamble', p0=self.myptr) + + def test_cached_equal_fields(self): + loop = """ + [p5, p6] + i10 = getfield_gc(p5, descr=valuedescr) + i11 = getfield_gc(p6, descr=nextdescr) + call(i10, i11, descr=nonwritedescr) + setfield_gc(p6, i10, descr=nextdescr) From noreply at buildbot.pypy.org Mon Apr 7 20:54:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 7 Apr 2014 20:54:03 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: merge py3k Message-ID: <20140407185403.37FA41C0110@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70485:2f1ec8d05ebc Date: 2014-04-07 11:53 -0700 http://bitbucket.org/pypy/pypy/changeset/2f1ec8d05ebc/ Log: merge py3k diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,7 +48,6 @@ def tearDown(self): os.chdir(self.old_dir) - gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -19,5 +19,5 @@ fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) imp.load_module('_ctypes_test', fp, filename, description) except ImportError: - print 'could not find _ctypes_test in',output_dir + print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -4,7 +4,6 @@ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ .. _`pypy/bin/pyinteractive.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/pyinteractive.py @@ -35,7 +34,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -49,21 +47,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -75,6 +73,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -93,16 +96,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, @@ -491,7 +485,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,25 +15,49 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the 2nd call +for donation (*not ready yet*) +.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html -Introduction and current status -=============================== + +Introduction +============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of -PyPy, comparing the JITting version in both cases. It is called STM for -Software Transactional Memory, which is the internal technique used (see -`Reference to implementation details`_). +listed below, it should be in theory within 25%-50% of the speed of a +regular PyPy, comparing the JITting version in both cases. It is called +STM for Software Transactional Memory, which is the internal technique +used (see `Reference to implementation details`_). + +What you get in exchange for this slow-down is that ``pypy-stm`` runs +any multithreaded Python program on multiple CPUs at once. Programs +running two threads or more in parallel should ideally run faster than +in a regular PyPy, either now or soon as issues are fixed. In one way, +that's all there is to it: this is a GIL-less Python, feel free to +`download and try it`__. However, the deeper idea behind the +``pypy-stm`` project is to improve what is so far the state-of-the-art +for using multiple CPUs, which for cases where separate processes don't +work is done by writing explicitly multi-threaded programs. Instead, +``pypy-stm`` is flushing forward an approach to *hide* the threads, as +described below in `atomic sections`_. + + +.. __: + +Current status +============== **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only interested in trying it out, you can download a Ubuntu 12.04 binary -here__. The current version supports four "segments", which means that -it will run up to four threads in parallel (in other words, you get a -GIL effect again, but only if trying to execute more than 4 threads). +here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, +but not stripped of debug symbols). The current version supports four +"segments", which means that it will run up to four threads in parallel +(in other words, you get a GIL effect again, but only if trying to +execute more than 4 threads). To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like @@ -46,17 +70,19 @@ rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py .. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ -.. __: http://buildbot.pypy.org/nightly/stmgc-c7/ +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/misc/ .. __: http://clang.llvm.org/get_started.html .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ Caveats: -* It should generally work. Please do `report bugs`_ that manifest as a - crash or wrong behavior (markedly different from the behavior of a - regular PyPy). Performance bugs are likely to be known issues; we're - working on them. +* So far, small examples work fine, but there are still a number of + bugs. We're busy fixing them. + +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not detected correctly, so may + cause segmentation faults. * The JIT warm-up time is abysmal (as opposed to the regular PyPy's, which is "only" bad). Moreover, you should run it with a command like @@ -73,9 +99,11 @@ programs that modify large lists or dicts, suffer from these missing optimizations. -* The GC has no support for destructors: the ``__del__`` method is - never called (including on file objects, which won't be closed for - you). This is of course temporary. +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in @@ -98,6 +126,7 @@ probably, several days or more. .. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h @@ -172,9 +201,9 @@ unchanged. This capability can be hidden in a library or in the framework you use; the end user's code does not need to be explicitly aware of using threads. For a simple example of this, see -`lib_pypy/transaction.py`_. The idea is that if you have a program -where the function ``f(key, value)`` runs on every item of some big -dictionary, you can replace the loop with:: +`transaction.py`_ in ``lib_pypy``. The idea is that if you have a +program where the function ``f(key, value)`` runs on every item of some +big dictionary, you can replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) @@ -185,7 +214,18 @@ result is that the behavior should be exactly equivalent: you don't get any extra multithreading issue. -.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +This approach hides the notion of threads from the end programmer, +including all the hard multithreading-related issues. This is not the +first alternative approach to explicit threads; for example, OpenMP_ is +one. However, it is one of the first ones which does not require the +code to be organized in a particular fashion. Instead, it works on any +Python program which has got latent, imperfect parallelism. Ideally, it +only requires that the end programmer identifies where this parallelism +is likely to be found, and communicates it to the system, using for +example the ``transaction.add()`` scheme. + +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP ================== @@ -221,7 +261,7 @@ The core of the implementation is in a separate C library called stmgc_, in the c7_ subdirectory. Please see the `README.txt`_ for more -information. +information. In particular, the notion of segment is discussed there. .. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ .. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -86,7 +86,7 @@ option (this is the default at some optimization levels like ``-O1``, but unneeded for high-performance translations like ``-O2``). You may get it at -http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc-7.1.tar.gz +http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with pypy. Unpack this folder in the base directory. Then open a command diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -167,36 +167,37 @@ run_fork_hooks('before', space) try: - pid = os.fork() - except OSError, e: - raise wrap_oserror(space, e) + try: + pid = os.fork() + except OSError, e: + raise wrap_oserror(space, e) - if pid == 0: - # Child process - # Code from here to _exit() must only use - # async-signal-safe functions, listed at `man 7 signal` - # http://www.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html. - if not space.is_none(w_preexec_fn): - # We'll be calling back into Python later so we need - # to do this. This call may not be async-signal-safe - # but neither is calling back into Python. The user - # asked us to use hope as a strategy to avoid - # deadlock... - run_fork_hooks('child', space) + if pid == 0: + # Child process + # Code from here to _exit() must only use + # async-signal-safe functions, listed at `man 7 signal` + # http://www.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html. + if not space.is_none(w_preexec_fn): + # We'll be calling back into Python later so we need + # to do this. This call may not be async-signal-safe + # but neither is calling back into Python. The user + # asked us to use hope as a strategy to avoid + # deadlock... + run_fork_hooks('child', space) - c_child_exec( - l_exec_array, l_argv, l_envp, l_cwd, - p2cread, p2cwrite, c2pread, c2pwrite, - errread, errwrite, errpipe_read, errpipe_write, - close_fds, restore_signals, call_setsid, - l_fds_to_keep, len(fds_to_keep), - PreexecCallback.run_function, None) - os._exit(255) + c_child_exec( + l_exec_array, l_argv, l_envp, l_cwd, + p2cread, p2cwrite, c2pread, c2pwrite, + errread, errwrite, errpipe_read, errpipe_write, + close_fds, restore_signals, call_setsid, + l_fds_to_keep, len(fds_to_keep), + PreexecCallback.run_function, None) + os._exit(255) + finally: + # parent process + run_fork_hooks('parent', space) - # parent process finally: - run_fork_hooks('parent', space) - preexec.w_preexec_fn = None if l_cwd: diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -1,10 +1,17 @@ import py from rpython.config.translationoption import get_combined_translation_config from rpython.config.translationoption import set_opt_level -from rpython.config.config import ConflictConfigError +from rpython.config.config import ConflictConfigError, ConfigError +from rpython.translator.platform import platform as compiler def test_no_gcrootfinder_with_boehm(): config = get_combined_translation_config() config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') + +if compiler.name == 'msvc': + def test_no_asmgcrot_on_msvc(): + config = get_combined_translation_config() + py.test.raises(ConfigError, config.translation.setoption, + 'gcrootfinder', 'asmgcc', 'user') diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,9 +1,10 @@ import sys import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption -from rpython.config.config import ChoiceOption, StrOption, Config +from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors +from rpython.translator.platform import platform as compiler DEFL_INLINE_THRESHOLD = 32.4 # just enough to inline add__Int_Int() # and just small enough to prevend inlining of some rlist functions. @@ -16,8 +17,13 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] +elif compiler.name == 'msvc': + DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -85,7 +91,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ROOTFINDERS, "shadowstack", cmdline="--gcrootfinder", requires={ diff --git a/rpython/flowspace/test/test_unroll.py b/rpython/flowspace/test/test_unroll.py --- a/rpython/flowspace/test/test_unroll.py +++ b/rpython/flowspace/test/test_unroll.py @@ -1,23 +1,10 @@ import operator + from rpython.flowspace.test.test_objspace import Base -from rpython.rlib.unroll import unrolling_zero, unrolling_iterable +from rpython.rlib.unroll import unrolling_iterable + class TestUnroll(Base): - - def test_unrolling_int(self): - l = range(10) - def f(tot): - i = unrolling_zero - while i < len(l): - tot += l[i] - i = i + 1 - return tot*2 - assert f(0) == sum(l)*2 - - graph = self.codetest(f) - ops = self.all_operations(graph) - assert ops == {'inplace_add': 10, 'mul': 1} - def test_unroller(self): l = unrolling_iterable(range(10)) def f(tot): diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -5,7 +5,7 @@ """ import weakref -import os +import os, py from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype from rpython.rlib.jit import JitDriver, dont_look_inside @@ -13,6 +13,7 @@ from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir from rpython.config.translationoption import DEFL_GC +from rpython.config.config import ConfigError class X(object): @@ -166,6 +167,9 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) + except ConfigError, e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -1,4 +1,4 @@ -import os, sys +import os, sys, py from rpython.tool.udir import udir from rpython.rlib.jit import JitDriver, unroll_parameters, set_param from rpython.rlib.jit import PARAMETERS, dont_look_inside @@ -7,7 +7,7 @@ from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy - +from rpython.config.config import ConfigError class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() @@ -252,6 +252,9 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) + except ConfigError,e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py @@ -1,4 +1,9 @@ +import py from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests +from rpython.translator.platform import platform as compiler + +if compiler.name == 'msvc': + py.test.skip('asmgcc buggy on msvc') class TestAsmGcc(CompileFrameworkTests): gcrootfinder = "asmgcc" diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -567,7 +567,8 @@ # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. - hash = self.status & self.ST_SHIFT_MASK + hash = self.status + assert hash == (self.status & self.ST_SHIFT_MASK) # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. diff --git a/rpython/jit/metainterp/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py rename from rpython/jit/metainterp/test/test_intbound.py rename to rpython/jit/metainterp/optimizeopt/test/test_intbound.py --- a/rpython/jit/metainterp/test/test_intbound.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py @@ -1,5 +1,7 @@ from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 + from copy import copy import sys from rpython.rlib.rarithmetic import LONG_BIT @@ -235,8 +237,8 @@ for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)): #assert not b.lshift_bound(shift_count_bound).has_upper assert not b.rshift_bound(shift_count_bound).has_upper - - + + def test_div_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -258,7 +260,6 @@ assert a.contains(0) - def test_sub_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -271,3 +272,14 @@ a=bound(2, 4).sub_bound(bound(1, 2)) assert not a.contains(-1) assert not a.contains(4) + + +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py deleted file mode 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py +++ /dev/null @@ -1,12 +0,0 @@ -from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 - - -def test_next_pow2_m1(): - assert next_pow2_m1(0) == 0 - assert next_pow2_m1(1) == 1 - assert next_pow2_m1(7) == 7 - assert next_pow2_m1(256) == 511 - assert next_pow2_m1(255) == 255 - assert next_pow2_m1(80) == 127 - assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 - assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py rename from rpython/jit/metainterp/test/test_virtualstate.py rename to rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -89,6 +89,11 @@ assert isgeneral(OptValue(ConstPtr(fooref)), OptValue(ConstPtr(fooref))) + value1 = OptValue(BoxPtr()) + value1.make_nonnull(None) + value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + assert not isgeneral(value1, value2) + def test_field_matching_generalization(self): const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) @@ -202,6 +207,17 @@ """ self.compare(guards, expected, [box]) + def test_known_value(self): + value1 = OptValue(self.nodebox) + value1.make_constant(ConstInt(1)) + box = self.nodebox + guards = value1.make_guards(box) + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.compare(guards, expected, [box]) + def test_equal_inputargs(self): value = OptValue(self.nodebox) classbox = self.cpu.ts.cls_of_box(self.nodebox) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -296,7 +296,7 @@ i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) def close_loop(self, start_label, jumpop): virtual_state = self.initial_virtual_state @@ -403,9 +403,9 @@ assert isinstance(target_token, TargetToken) target_token.targeting_jitcell_token.retraced_count = sys.maxint - self.finilize_short_preamble(start_label) + self.finalize_short_preamble(start_label) - def finilize_short_preamble(self, start_label): + def finalize_short_preamble(self, start_label): short = self.short assert short[-1].getopnum() == rop.JUMP target_token = start_label.getdescr() diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -18,7 +18,15 @@ position = -1 def generalization_of(self, other, renum, bad): - raise NotImplementedError + assert self.position != -1 + if self.position in renum: + result = renum[self.position] == other.position + else: + renum[self.position] = other.position + result = self.generalization_of_renumbering_done(other, renum, bad) + if not result: + bad[self] = bad[other] = None + return result def generate_guards(self, other, box, cpu, extra_guards, renum): if self.generalization_of(other, renum, {}): @@ -67,37 +75,21 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, AbstractVirtualStructStateInfo) assert len(self.fielddescrs) == len(self.fieldstate) assert len(other.fielddescrs) == len(other.fieldstate) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False for i in range(len(self.fielddescrs)): if other.fielddescrs[i] is not self.fielddescrs[i]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -130,11 +122,8 @@ self.known_class = known_class def _generalization_of(self, other): - if not isinstance(other, VirtualStateInfo): - return False - if not self.known_class.same_constant(other.known_class): - return False - return True + return (isinstance(other, VirtualStateInfo) and + self.known_class.same_constant(other.known_class)) def debug_header(self, indent): debug_print(indent + 'VirtualStateInfo(%d):' % self.position) @@ -146,11 +135,8 @@ self.typedescr = typedescr def _generalization_of(self, other): - if not isinstance(other, VStructStateInfo): - return False - if self.typedescr is not other.typedescr: - return False - return True + return (isinstance(other, VStructStateInfo) and + self.typedescr is other.typedescr) def debug_header(self, indent): debug_print(indent + 'VStructStateInfo(%d):' % self.position) @@ -165,28 +151,14 @@ return (isinstance(other, VArrayStateInfo) and self.arraydescr is other.arraydescr) - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False if len(self.fieldstate) != len(other.fieldstate): - bad[self] = None - bad[other] = None return False for i in range(len(self.fieldstate)): if not self.fieldstate[i].generalization_of(other.fieldstate[i], renum, bad): - bad[self] = None - bad[other] = None return False return True @@ -216,41 +188,23 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position + def generalization_of_renumbering_done(self, other, renum, bad): if not self._generalization_of(other): - bad[self] = None - bad[other] = None return False assert isinstance(other, VArrayStructStateInfo) if len(self.fielddescrs) != len(other.fielddescrs): - bad[self] = None - bad[other] = None return False p = 0 for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): - bad[self] = None - bad[other] = None return False for j in range(len(self.fielddescrs[i])): if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: - bad[self] = None - bad[other] = None return False if not self.fieldstate[p].generalization_of(other.fieldstate[p], renum, bad): - bad[self] = None - bad[other] = None return False p += 1 return True @@ -302,49 +256,31 @@ self.position_in_notvirtuals = -1 self.lenbound = value.lenbound - def generalization_of(self, other, renum, bad): + def generalization_of_renumbering_done(self, other, renum, bad): # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? - assert self.position != -1 - if self.position in renum: - if renum[self.position] == other.position: - return True - bad[self] = None - bad[other] = None - return False - renum[self.position] = other.position if not isinstance(other, NotVirtualStateInfo): - bad[self] = None - bad[other] = None return False if other.level < self.level: - bad[self] = None - bad[other] = None return False if self.level == LEVEL_CONSTANT: if not self.constbox.same_constant(other.constbox): - bad[self] = None - bad[other] = None return False elif self.level == LEVEL_KNOWNCLASS: if not self.known_class.same_constant(other.known_class): - bad[self] = None - bad[other] = None return False + elif self.level == LEVEL_NONNULL: + if other.constbox and not other.constbox.nonnull(): + return False + if not self.intbound.contains_bound(other.intbound): - bad[self] = None - bad[other] = None return False if self.lenbound and other.lenbound: if self.lenbound.mode != other.lenbound.mode or \ self.lenbound.descr != other.lenbound.descr or \ not self.lenbound.bound.contains_bound(other.lenbound.bound): - bad[self] = None - bad[other] = None return False elif self.lenbound: - bad[self] = None - bad[other] = None return False return True diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -501,6 +501,50 @@ return g(n, 1) + g(n, 2) assert self.meta_interp(h, [25]) == 7 * 25 * (7 + 8 + 2 + 3) + def test_two_bridged_loops_classes(self): + myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'n', 'x', 's']) + class A(object): + pass + bytecode = "I7i" + def f(n, s): + i = x = 0 + pos = 0 + op = '-' + while pos < len(bytecode): + myjitdriver.jit_merge_point(pos=pos, i=i, n=n, s=s, x=x) + op = bytecode[pos] + if op == 'i': + i += 1 + pos -= 2 + myjitdriver.can_enter_jit(pos=pos, i=i, n=n, s=s, x=x) + continue + elif op == 'I': + if not (i < n): + pos += 2 + elif op == '7': + if s is not None: + x = x + 7 + else: + x = x + 2 + pos += 1 + return x + + def g(n, s): + if s == 2: + s = None + else: + s = A() + sa = 0 + for i in range(7): + sa += f(n, s) + return sa + #assert self.meta_interp(g, [25, 1]) == g(25, 1) + + def h(n): + return g(n, 1) + g(n, 2) + assert self.meta_interp(h, [25]) == h(25) + + def test_three_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x']) bytecode = ".+357" diff --git a/rpython/rlib/test/test_jit_libffi.py b/rpython/rlib/test/test_jit_libffi.py --- a/rpython/rlib/test/test_jit_libffi.py +++ b/rpython/rlib/test/test_jit_libffi.py @@ -1,13 +1,16 @@ import math import ctypes +import sys from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib import clibffi from rpython.rlib.rarithmetic import intmask from rpython.rlib.jit_libffi import CIF_DESCRIPTION from rpython.rlib.jit_libffi import jit_ffi_prep_cif, jit_ffi_call - -math_sin = intmask(ctypes.cast(ctypes.CDLL(None).sin, ctypes.c_void_p).value) +if sys.platform == 'win32': + math_sin = intmask(ctypes.cast(ctypes.cdll.msvcrt.sin, ctypes.c_void_p).value) +else: + math_sin = intmask(ctypes.cast(ctypes.CDLL(None).sin, ctypes.c_void_p).value) math_sin = rffi.cast(rffi.VOIDP, math_sin) diff --git a/rpython/rlib/test/test_libffi.py b/rpython/rlib/test/test_libffi.py --- a/rpython/rlib/test/test_libffi.py +++ b/rpython/rlib/test/test_libffi.py @@ -540,7 +540,7 @@ } """ libfoo = CDLL(self.libfoo_name) - ffi_point_struct = make_struct_ffitype_e(0, 0, [types.signed, types.signed]) + ffi_point_struct = make_struct_ffitype_e(rffi.sizeof(rffi.SIGNED)*2, 0, [types.signed, types.signed]) ffi_point = ffi_point_struct.ffistruct libfoo = CDLL(self.libfoo_name) diff --git a/rpython/rlib/unroll.py b/rpython/rlib/unroll.py --- a/rpython/rlib/unroll.py +++ b/rpython/rlib/unroll.py @@ -6,31 +6,14 @@ class SpecTag(object): __slots__ = () - + def __repr__(self): return '%s(0x%x)' % (self.__class__.__name__, uid(self)) + def _freeze_(self): return True -class unrolling_int(int, SpecTag): - - def __add__(self, other): - return unrolling_int(int.__add__(self, other)) - - __radd__ = __add__ - - def __sub__(self, other): - return unrolling_int(int.__sub__(self, other)) - - def __rsub__(self, other): - return unrolling_int(int.__rsub__(self, other)) - - -unrolling_zero = unrolling_int(0) - -# ____________________________________________________________ - # 'for' iteration over iterables wrapped in an instance # of unrolling_iterable will be unrolled by the flow space, # like in: diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -1172,6 +1172,8 @@ # XXX magic: on Windows try to load the function from 'kernel32' too if cfunc is None and hasattr(ctypes, 'windll'): cfunc = get_on_lib(ctypes.windll.kernel32, funcname) + if cfunc is None and hasattr(ctypes, 'windll'): + cfunc = get_on_lib(ctypes.cdll.msvcrt, funcname) if cfunc is None: # function name not found in any of the libraries From noreply at buildbot.pypy.org Mon Apr 7 21:48:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 7 Apr 2014 21:48:57 +0200 (CEST) Subject: [pypy-commit] extradoc stm-edit: Fix Message-ID: <20140407194857.F31ED1C0110@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-edit Changeset: r5188:b18f495241c3 Date: 2014-04-07 21:48 +0200 http://bitbucket.org/pypy/extradoc/changeset/b18f495241c3/ Log: Fix diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -414,7 +414,7 @@ of programming. The PyPy project created a substantially improved Python language implementation, including a fast Just-in-time (JIT) compiler. The increased execution speed that PyPy provides has attracted many users, -who now find their Python code runs up to four times faster under PyPy +who now find their Python code runs between 2 and 50 times faster under PyPy than under the reference implementation written in C. However, in the presence of today's machines with multiple processors, From noreply at buildbot.pypy.org Mon Apr 7 21:49:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 7 Apr 2014 21:49:56 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Fix Message-ID: <20140407194956.BA0591C0110@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5189:ff54844779ce Date: 2014-04-07 21:48 +0200 http://bitbucket.org/pypy/extradoc/changeset/ff54844779ce/ Log: Fix diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -414,7 +414,7 @@ of programming. The PyPy project created a substantially improved Python language implementation, including a fast Just-in-time (JIT) compiler. The increased execution speed that PyPy provides has attracted many users, -who now find their Python code runs up to four times faster under PyPy +who now find their Python code runs between 2 and 50 times faster under PyPy than under the reference implementation written in C. However, in the presence of today's machines with multiple processors, From noreply at buildbot.pypy.org Tue Apr 8 10:10:09 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 8 Apr 2014 10:10:09 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: make generate_guards take an OptValue instead of a box Message-ID: <20140408081009.546261C303A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70486:63d82c48f6f4 Date: 2014-04-08 10:09 +0200 http://bitbucket.org/pypy/pypy/changeset/63d82c48f6f4/ Log: make generate_guards take an OptValue instead of a box (in theory, it is not necessary to make it take a second virtual state now, but well) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -144,10 +144,16 @@ class BaseTestGenerateGuards(BaseTest): - def guards(self, info1, info2, box, expected): + def guards(self, info1, info2, box_or_value, expected): + if isinstance(box_or_value, OptValue): + value = box_or_value + box = value.box + else: + box = box_or_value + value = OptValue(box) info1.position = info2.position = 0 guards = [] - info1.generate_guards(info2, box, self.cpu, guards, {}) + info1.generate_guards(info2, value, self.cpu, guards, {}) self.compare(guards, expected, [box]) def compare(self, guards, expected, inputargs): @@ -162,7 +168,7 @@ assert equaloplists(guards, loop.operations, False, boxmap) def test_intbounds(self): - value1 = OptValue(BoxInt()) + value1 = OptValue(BoxInt(15)) value1.intbound.make_ge(IntBound(0, 10)) value1.intbound.make_le(IntBound(20, 30)) info1 = NotVirtualStateInfo(value1) @@ -174,7 +180,7 @@ i2 = int_le(i0, 30) guard_true(i2) [] """ - self.guards(info1, info2, BoxInt(15), expected) + self.guards(info1, info2, value1, expected) py.test.raises(InvalidLoop, self.guards, info1, info2, BoxInt(50), expected) @@ -219,7 +225,7 @@ self.compare(guards, expected, [box]) def test_equal_inputargs(self): - value = OptValue(self.nodebox) + value = OptValue(self.nodebox) classbox = self.cpu.ts.cls_of_box(self.nodebox) value.make_constant_class(classbox, -1) knownclass_info = NotVirtualStateInfo(value) @@ -242,20 +248,20 @@ expected = """ [p0] - guard_nonnull(p0) [] + guard_nonnull(p0) [] guard_class(p0, ConstClass(node_vtable)) [] """ guards = [] - vstate1.generate_guards(vstate2, [self.nodebox, self.nodebox], self.cpu, guards) + vstate1.generate_guards(vstate2, [value, value], self.cpu, guards) self.compare(guards, expected, [self.nodebox]) with py.test.raises(InvalidLoop): guards = [] - vstate1.generate_guards(vstate3, [self.nodebox, self.nodebox], + vstate1.generate_guards(vstate3, [value, value], self.cpu, guards) with py.test.raises(InvalidLoop): guards = [] - vstate2.generate_guards(vstate3, [self.nodebox, self.nodebox], + vstate2.generate_guards(vstate3, [value, value], self.cpu, guards) def test_known_value_virtualstate(self): @@ -271,7 +277,7 @@ guard_value(i0, 1) [] """ guards = [] - vstate1.generate_guards(vstate2, [box2], self.cpu, guards) + vstate1.generate_guards(vstate2, [value2], self.cpu, guards) self.compare(guards, expected, [box2]) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -545,6 +545,8 @@ args = jumpop.getarglist() modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(args) + values = [self.getvalue(arg) + for arg in jumpop.getarglist()] debug_start('jit-log-virtualstate') virtual_state.debug_print("Looking for ") @@ -563,15 +565,14 @@ try: cpu = self.optimizer.cpu target.virtual_state.generate_guards(virtual_state, - args, cpu, + values, + cpu, extra_guards) ok = True debugmsg = 'Guarded to match ' except InvalidLoop: pass - #else: - # import pdb; pdb.set_trace() if ok and not patchguardop: # if we can't patch the guards to go to a good target, no use # in jumping to this label @@ -590,8 +591,6 @@ if ok: debug_stop('jit-log-virtualstate') - values = [self.getvalue(arg) - for arg in jumpop.getarglist()] args = target.virtual_state.make_inputargs(values, self.optimizer, keyboxes=True) short_inputargs = target.short_preamble[0].getarglist() @@ -604,6 +603,7 @@ self.optimizer.send_extra_operation(guard) try: + # NB: the short_preamble ends with a jump for shop in target.short_preamble[1:]: newop = inliner.inline_op(shop) if newop.is_guard(): diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -4,7 +4,7 @@ from rpython.jit.metainterp.optimizeopt import virtualize from rpython.jit.metainterp.optimizeopt.intutils import IntUnbounded from rpython.jit.metainterp.optimizeopt.optimizer import (LEVEL_CONSTANT, - LEVEL_KNOWNCLASS, LEVEL_NONNULL, LEVEL_UNKNOWN) + LEVEL_KNOWNCLASS, LEVEL_NONNULL, LEVEL_UNKNOWN, OptValue) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.debug import debug_start, debug_stop, debug_print from rpython.rlib.objectmodel import we_are_translated @@ -28,7 +28,8 @@ bad[self] = bad[other] = None return result - def generate_guards(self, other, box, cpu, extra_guards, renum): + def generate_guards(self, other, value, cpu, extra_guards, renum): + assert isinstance(value, OptValue) if self.generalization_of(other, renum, {}): return if renum[self.position] != other.position: @@ -36,9 +37,9 @@ 'match. This means that two virtual fields ' + 'have been set to the same Box in one of the ' + 'virtual states but not in the other.') - self._generate_guards(other, box, cpu, extra_guards) + self._generate_guards(other, value, cpu, extra_guards, renum) - def _generate_guards(self, other, box, cpu, extra_guards): + def _generate_guards(self, other, value, cpu, extra_guards, renum): raise InvalidLoop('Generating guards for making the VirtualStates ' + 'at hand match have not been implemented') @@ -279,7 +280,8 @@ return False return True - def _generate_guards(self, other, box, cpu, extra_guards): + def _generate_guards(self, other, value, cpu, extra_guards, renum): + box = value.box if not isinstance(other, NotVirtualStateInfo): raise InvalidLoop('The VirtualStates does not match as a ' + 'virtual appears where a pointer is needed ' + @@ -408,11 +410,11 @@ return False return True - def generate_guards(self, other, args, cpu, extra_guards): - assert len(self.state) == len(other.state) == len(args) + def generate_guards(self, other, values, cpu, extra_guards): + assert len(self.state) == len(other.state) == len(values) renum = {} for i in range(len(self.state)): - self.state[i].generate_guards(other.state[i], args[i], + self.state[i].generate_guards(other.state[i], values[i], cpu, extra_guards, renum) def make_inputargs(self, values, optimizer, keyboxes=False): From noreply at buildbot.pypy.org Tue Apr 8 10:40:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 10:40:46 +0200 (CEST) Subject: [pypy-commit] extradoc stm-edit: Fix typos; fix one minor misunderstanding. Message-ID: <20140408084046.424691C10C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-edit Changeset: r5190:6b61eeb6e772 Date: 2014-04-08 10:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/6b61eeb6e772/ Log: Fix typos; fix one minor misunderstanding. diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -54,13 +54,13 @@ major restructuring of the program and often need extreme care and extra knowledge to use them. -We propose implemention of +We propose an implemention of Transactional Memory in PyPy. This is a technique that recently came to the forefront of the multi-core scene. It promises to offer multi-core CPU -usage without the explicit multiprocessing or event techniques above, -and also should allow modifying the core of the event systems -mentioned above to enable the use of multiple cores without the explicit use of -the ``threading`` module by the user. +usage in a single process. +In particular, by modifying the core of the event systems +mentioned above, we will enable the use of multiple cores, without the +user needing to use explicitly the ``threading`` module. The first proposal was launched near the start of 2012 and has covered much of the fundamental research, up to the point of getting a first @@ -88,15 +88,16 @@ We currently estimate the final performance goal to be a slow-down of 25% to 40% from the current non-TM PyPy; i.e. running a fully serial application would take between -1.25 and 1.40x the time it takes in a regular PyPy. (This goal has +1.25 and 1.40x the time it takes in a regular PyPy. This goal has been reached already in some cases, but we need to make this result more -broadly applicable.) We feel confident that the performance of PyPy-TM will -running any suitable +broadly applicable. We feel confident that we can reach this goal more +generally: the performance of PyPy-TM running any suitable application should scale linearly or close-to-linearly with the number of processors. This means that starting with two cores, such applications should perform better than a non-TM PyPy. (All numbers presented here are comparing different versions of PyPy which all have -the JIT enabled.) +the JIT enabled. A "suitable application" is one without many conflicts; +see `goal 2`_.) You will find below a sketch of the `work plan`_. If more money than requested is collected, then the excess will be entered into the general @@ -148,8 +149,8 @@ Software Transactional Memory (STM) library currently used inside PyPy with a much smaller Hardware Transactional Memory (HTM) library based on hardware features and running on Haswell-generation processors. This -has been attempted by Remi Meier recently. However, it seems that we -see the scaling problems as expected: the current generation of HTM +has been attempted by Remi Meier recently. However, it seems that it +fails to scale as we would expect it to: the current generation of HTM processors is limited to run small-scale transactions. Even the default transaction size used in PyPy-STM is often too much for HTM; and reducing this size increases overhead without completely solving the @@ -166,8 +167,8 @@ independent objects that happens to live in the same cache line, which is usually 64 bytes). This is in contrast with the current PyPy-STM, which doesn't have false conflicts of this kind at all and might thus be -ultimately better for very-long-running transactions. We are not aware of -published research discussing issues of very-long-running transactions. +ultimately better for very-long-running transactions. We are not aware of +published research discussing issues of sub-cache-line false conflicts. Note that right now PyPy-STM has false conflicts within the same object, e.g. within a list or a dictionary; but we can easily do something @@ -184,17 +185,18 @@ While there have been early experiments on Hardware Transactional Memory with CPython (`Riley and Zilles (2006)`__, `Tabba (2010)`__), there has -been none in the past few years. The closest is an attempt using `Haswell on the +been none in the past few years. To the best of our knowledge, +the closest is an attempt using `Haswell on the Ruby interpreter`__. None of these attempts tries to do the same using Software Transactional Memory. We would nowadays consider it possible to adapt our stmgc-c7 library for CPython, but it would be a lot of -work, starting from changing the reference-counting garbage colleciton scheme. PyPy is +work, starting from changing the reference-counting garbage collection scheme. PyPy is better designed to be open to this kind of research. -However, the best argument from an objective point of view is probably that -PyPy has already implemented a JIT. It is thus starting from a better -position in terms of performance, particularly for the long-running kind -of programs that we target here. +However, the best argument from an objective point of view is probably +that PyPy has already implemented a Just-in-Time compiler. It is thus +starting from a better position in terms of performance, particularly +for the long-running kind of programs that we target here. .. __: http://sabi.net/nriley/pubs/dls6-riley.pdf .. __: http://www.cs.auckland.ac.nz/~fuad/parpycan.pdf @@ -207,7 +209,7 @@ PyPy-TM will be slower than judicious usage of existing alternatives, based on multiple processes that communicate with each other in one way or another. The counter-argument is that TM is not only a cleaner -solution: there are cases in which it is not possilbe to organize (or +solution: there are cases in which it is not really possible to organize (or retrofit) an existing program into the particular format needed for the alternatives. In particular, small quickly-written programs don't need the additional baggage of cross-process communication; and large @@ -226,8 +228,8 @@ The way TM works right now would further divide this limit by N+1, where N is the number of segments. It might be possible to create partially different memory views for multiple threads that -each access the same range of addresses; this would require extensions -that are very OS-specific. We didn't investigate so far. +each access the same range of addresses; but this would likely require +changes inside the OS. We didn't investigate so far. The current 64-bit version relies heavily on Linux- and clang-only features. We believe it is a suitable From noreply at buildbot.pypy.org Tue Apr 8 10:40:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 10:40:47 +0200 (CEST) Subject: [pypy-commit] extradoc stm-edit: Close branch to merge Message-ID: <20140408084047.7CACC1C10C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stm-edit Changeset: r5191:9c2061ad3677 Date: 2014-04-08 10:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/9c2061ad3677/ Log: Close branch to merge From noreply at buildbot.pypy.org Tue Apr 8 10:40:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 10:40:48 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge stm-edit (thanks matti) Message-ID: <20140408084048.B75A71C10C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5192:d39f9507bbbe Date: 2014-04-08 10:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/d39f9507bbbe/ Log: merge stm-edit (thanks matti) diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -49,36 +49,36 @@ they can use the existing ``threading`` module, with its associated GIL and the complexities of real multi-threaded programming (locks, deadlocks, races, etc.), which make this solution less attractive. The -big alternative is for them to rely on one of various multi-process -solutions that are outside the scope of the core language. All of them require a -big restructuring of the program and often need extreme care and extra +most attractive alternative for most developers is to rely on one of various multi-process +solutions that are outside the scope of the core Python language. All of them require a +major restructuring of the program and often need extreme care and extra knowledge to use them. -The aim of this series of proposals is to research and implement +We propose an implemention of Transactional Memory in PyPy. This is a technique that recently came to the forefront of the multi-core scene. It promises to offer multi-core CPU -usage without requiring to fall back to the multi-process solutions -described above, and also should allow to change the core of the event systems -mentioned above to enable the use of multiple cores without the explicit use of -the ``threading`` module by the user. +usage in a single process. +In particular, by modifying the core of the event systems +mentioned above, we will enable the use of multiple cores, without the +user needing to use explicitly the ``threading`` module. The first proposal was launched near the start of 2012 and has covered -the fundamental research part, up to the point of getting a first +much of the fundamental research, up to the point of getting a first version of PyPy working in a very roughly reasonable state (after collecting about USD$27'000, which is little more than half of the money -that was asked; hence the present second call for donations). +that was sought; hence the present second call for donations). -This second proposal aims at fixing the remaining issues until we get a -really good GIL-free PyPy (described in `goal 1`_ below); and then we -will focus on the various new features needed to actually use multiple +We now propose fixing the remaining issues to obtaining a +really good GIL-free PyPy (described in `goal 1`_ below). We +will then focus on the various new features needed to actually use multiple cores without explicitly using multithreading (`goal 2`_ below), up to -and including adapting some existing framework libraries like for +and including adapting some existing framework libraries, for example Twisted, Tornado, Stackless, or gevent (`goal 3`_ below). -In more details -=============== +In more detail +============== This is a call for financial help in implementing a version of PyPy able to use multiple processors in a single process, called PyPy-TM; and @@ -87,16 +87,17 @@ Armin Rigo and Remi Meier and possibly others. We currently estimate the final performance goal to be a slow-down of -25% to 40%, i.e. running a fully serial application would take between -1.25 and 1.40x the time it takes in a regular PyPy. (This goal has +25% to 40% from the current non-TM PyPy; i.e. running a fully serial application would take between +1.25 and 1.40x the time it takes in a regular PyPy. This goal has been reached already in some cases, but we need to make this result more -broadly applicable.) We feel confident that it can work, in the -following sense: the performance of PyPy-TM running any suitable +broadly applicable. We feel confident that we can reach this goal more +generally: the performance of PyPy-TM running any suitable application should scale linearly or close-to-linearly with the number of processors. This means that starting with two cores, such -applications should perform better than in a regular PyPy. (All numbers +applications should perform better than a non-TM PyPy. (All numbers presented here are comparing different versions of PyPy which all have -the JIT enabled.) +the JIT enabled. A "suitable application" is one without many conflicts; +see `goal 2`_.) You will find below a sketch of the `work plan`_. If more money than requested is collected, then the excess will be entered into the general @@ -148,8 +149,8 @@ Software Transactional Memory (STM) library currently used inside PyPy with a much smaller Hardware Transactional Memory (HTM) library based on hardware features and running on Haswell-generation processors. This -has been attempted by Remi Meier recently. However, it seems that we -see scaling problems (as we expected them): the current generation of HTM +has been attempted by Remi Meier recently. However, it seems that it +fails to scale as we would expect it to: the current generation of HTM processors is limited to run small-scale transactions. Even the default transaction size used in PyPy-STM is often too much for HTM; and reducing this size increases overhead without completely solving the @@ -162,15 +163,15 @@ generally. A CPU with support for the virtual memory described in this paper would certainly be better for running PyPy-HTM. -Another issue is sub-cache-line false conflicts (conflicts caused by two +Another issue in HTM is sub-cache-line false conflicts (conflicts caused by two independent objects that happens to live in the same cache line, which is usually 64 bytes). This is in contrast with the current PyPy-STM, which doesn't have false conflicts of this kind at all and might thus be -ultimately better for very-long-running transactions. None of the -papers we know of discusses this issue. +ultimately better for very-long-running transactions. We are not aware of +published research discussing issues of sub-cache-line false conflicts. Note that right now PyPy-STM has false conflicts within the same object, -e.g. within a list or a dictionary; but we can more easily do something +e.g. within a list or a dictionary; but we can easily do something about it (see `goal 2_`). Also, it might be possible in PyPy-HTM to arrange objects in memory ahead of time so that such conflicts are very rare; but we will never get a rate of exactly 0%, which might be @@ -179,22 +180,23 @@ .. _`Virtualizing Transactional Memory`: http://pages.cs.wisc.edu/~isca2005/papers/08A-02.PDF -Why do it with PyPy instead of CPython? +Why do TM with PyPy instead of CPython? --------------------------------------- While there have been early experiments on Hardware Transactional Memory with CPython (`Riley and Zilles (2006)`__, `Tabba (2010)`__), there has -been no recent one. The closest is an attempt using `Haswell on the +been none in the past few years. To the best of our knowledge, +the closest is an attempt using `Haswell on the Ruby interpreter`__. None of these attempts tries to do the same using Software Transactional Memory. We would nowadays consider it possible to adapt our stmgc-c7 library for CPython, but it would be a lot of -work, starting from changing the reference-counting scheme. PyPy is +work, starting from changing the reference-counting garbage collection scheme. PyPy is better designed to be open to this kind of research. -But the best argument from an external point of view is probably that -PyPy has got a JIT to start with. It is thus starting from a better -position in terms of performance, particularly for the long-running kind -of programs that we target here. +However, the best argument from an objective point of view is probably +that PyPy has already implemented a Just-in-Time compiler. It is thus +starting from a better position in terms of performance, particularly +for the long-running kind of programs that we target here. .. __: http://sabi.net/nriley/pubs/dls6-riley.pdf .. __: http://www.cs.auckland.ac.nz/~fuad/parpycan.pdf @@ -207,7 +209,7 @@ PyPy-TM will be slower than judicious usage of existing alternatives, based on multiple processes that communicate with each other in one way or another. The counter-argument is that TM is not only a cleaner -solution: there are cases in which it is not doable to organize (or +solution: there are cases in which it is not really possible to organize (or retrofit) an existing program into the particular format needed for the alternatives. In particular, small quickly-written programs don't need the additional baggage of cross-process communication; and large @@ -217,35 +219,35 @@ rest of the program should work without changes. -Other platforms than the x86-64 Linux +Platforms other than the x86-64 Linux ------------------------------------- -The first thing to note is that the current solution depends on having a -huge address space available. If it were to be ported to any 32-bit -architecture, the limitation to 2GB or 4GB of address space would become -very restrictive: the way it works right now would further divide this +The current solution depends on having a +huge address space available. Porting to any 32-bit +architecture would quickly run into the limitation of a 2GB or 4GB of address space. +The way TM works right now would further divide this limit by N+1, where N is the number of segments. It might be possible to create partially different memory views for multiple threads that -each access the same range of addresses; this would require extensions -that are very OS-specific. We didn't investigate so far. +each access the same range of addresses; but this would likely require +changes inside the OS. We didn't investigate so far. -The current version, which thus only works on 64-bit, still relies +The current 64-bit version relies heavily on Linux- and clang-only features. We believe it is a suitable restriction: a lot of multi- and many-core servers commonly available are nowadays x86-64 machines running Linux. Nevertheless, non-Linux solutions appear to be possible as well. OS/X (and likely the various BSDs) seems to handle ``mmap()`` better than Linux does, and can remap individual pages of an existing mapping to various pages without hitting -a limit of 65536 like Linux. Windows might also have a way, although we -didn't measure yet; but the first issue with Windows would be to support -Win64, which the regular PyPy doesn't. +a limit of 65536 like Linux. Windows might also have a solution, although we +didn't measure yet; but first we would need a 64-bit Windows PyPy, which has +not seen much active support. -We will likely explore the OS/X way (as well as the Windows way if Win64 -support grows in PyPy), but this is not included in the scope of this -proposal. +We will likely explore the OS/X path (as well as the Windows path if Win64 +support grows in PyPy), but this is not part of this current +donation proposal. It might be possible to adapt the work done on x86-64 to the 64-bit -ARMv8 as well, but we didn't investigate so far. +ARMv8 as well. We have not investigated this so far. More readings From noreply at buildbot.pypy.org Tue Apr 8 11:26:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 11:26:35 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: As discussed on various places, adapt the amount sought to more reasonable levels. Message-ID: <20140408092635.2CF701C0467@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5193:5cdee90f4821 Date: 2014-04-08 11:26 +0200 http://bitbucket.org/pypy/extradoc/changeset/5cdee90f4821/ Log: As discussed on various places, adapt the amount sought to more reasonable levels. It is now 80'000$, corresponding to a half-time at 60$/hour for 16 months. diff --git a/planning/tmdonate2.txt b/planning/tmdonate2.txt --- a/planning/tmdonate2.txt +++ b/planning/tmdonate2.txt @@ -99,14 +99,7 @@ the JIT enabled. A "suitable application" is one without many conflicts; see `goal 2`_.) -You will find below a sketch of the `work plan`_. If more money than -requested is collected, then the excess will be entered into the general -PyPy pot, used for example to finance sprint travel costs to students. - -**Note** For donations higher than $1,000, we can arrange for an invoice -and a different payment method to avoid the high Paypal fees. Please -contact pypy at sfconservancy.org if you want to know details on how -to donate via other means. +You will find below a sketch of the `work plan`_. We start with a Q&A. What is the Global Interpreter Lock? @@ -266,17 +259,29 @@ complete the steps for an experienced developer who is already familiar with the PyPy codebase. As before, we cannot guarantee the time estimates here, but we do agree to report regularly to the community, so -our progress can be followed publicly. +our progress can be followed publicly. We currently expect the duration +of the whole project to be up to two years starting from April 2014. Paid work will be at $60/hour, but at least one developer who will work -on the project – Armin Rigo – has committed to 2 hours of volunteer -work per paid hour (so the total amount of money that we ask is divided -by three); and another developer – Remi Meier – is a Ph.D. student -and gets paid from another source already. +on the project – Armin Rigo – has committed to one hour of volunteer +work per paid hour; and another developer – Remi Meier – is a Ph.D. student +and gets paid from another source already. The total amount of money +that we ask below corresponds roughly to one half-time job. + A 10% general donation will go to the `Software Freedom Conservancy`_ itself, the non-profit organization of which the PyPy project is a member and which manages all the issues related to donations, payments, and tax-exempt status. +An extra fraction of the money collected will be entered into the +general PyPy pot, used for example to finance sprint travel costs to +students. This fraction is 10% maximum, unless more money than +requested is collected, in which case the whole excess will go to +the general PyPy pot. + +**Note** For donations higher than $1,000, we can arrange for an invoice +and a different payment method to avoid the high Paypal fees. Please +contact pypy at sfconservancy.org if you want to know details on how +to donate via other means. .. _`Software Freedom Conservancy`: http://sfconservancy.org/ @@ -394,13 +399,12 @@ ------- We forecast that goal 1 and a good chunk of goal 2 should be reached in -around 4 months of work. The remaining parts of goal 2 as well as goal +around 6 months of work. The remaining parts of goal 2 as well as goal 3 are likely to be more open-ended jobs. We will go with a total -estimate of 8 months, corresponding to roughly the second half of the -`first call for donations`_ which was not covered so far. This -corresponds to USD$80640. The amount sought by this fundraising -campaign, considering the 2 volunteer hours per paid hour is thus -USD$26880. +estimate of two years in order to get a final, well-tested PyPy-STM with +stable performance. The amount sought by this fundraising campaign is +USD$80'000, corresponding to one half-time job for 16 months (1200 hours +at $60/hour plus 10% overhead). Benefits of This Work to the Python Community and the General Public From noreply at buildbot.pypy.org Tue Apr 8 14:56:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 14:56:57 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Copy from extradoc/planning/tmdonate2.txt. Message-ID: <20140408125657.019A51D2380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r478:1e46b3b476e8 Date: 2014-04-08 14:56 +0200 http://bitbucket.org/pypy/pypy.org/changeset/1e46b3b476e8/ Log: Copy from extradoc/planning/tmdonate2.txt. diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt new file mode 100644 --- /dev/null +++ b/source/tmdonate2.txt @@ -0,0 +1,447 @@ +--- +layout: page +title: 2nd Call for donations - Transactional Memory in PyPy +--- + +============================== +Transactional Memory, 2nd Call +============================== + + +This is the second call for donations on the topic of Transactional +Memory (TM) in PyPy, a way to run CPU-hungry Python programs in +multithreaded mode. It is a follow-up on our `first call for +donations`_ from two years ago. At that time, we suggested a +single-threaded slow-down of somewhere between 2x and 5x. The aim that +seems now within reach is rather closer to 1.25x, i.e. running only 25% +slower than the regular PyPy. + +We achieved – or overachieved – most goals laid out in the first call by +a large margin, while at the same time raising only about half the +money. The result of this first step is `described in the documentation +of PyPy`__. + +The present proposal is about development of the second half: starting +from the various missing low-level optimizations, it will most +importantly focus on developing the Python-facing interface. This +includes both internal things (e.g. do dictionaries need to be more +TM-friendly in general?) as well as directly visible things (e.g. some +profiler-like interface to explore common conflicts in a program). It +also includes exploring and tweaking some existing libraries to improve +their TM-friendliness (e.g. Twisted and Stackless). + +See also the `update on HTM`_ below. + +.. _`first call for donations`: http://pypy.org/tmdonate.html +.. __: https://pypy.readthedocs.org/en/latest/stm.html + + + +Introduction +============ + +In the presence of today's machines with multiple processors, Python +progress is lagging behind: on any CPU-constrained program, developers +have a difficult choice to make. They can use in-process solutions that +do not offer multi-CPU usage. In this respect, the natural choice +nowadays is to use Twisted or other event-based paradigms, or systems +that hide events in the control flow, like Stackless; or alternatively, +they can use the existing ``threading`` module, with its associated GIL +and the complexities of real multi-threaded programming (locks, +deadlocks, races, etc.), which make this solution less attractive. The +most attractive alternative for most developers is to rely on one of various multi-process +solutions that are outside the scope of the core Python language. All of them require a +major restructuring of the program and often need extreme care and extra +knowledge to use them. + +We propose an implemention of +Transactional Memory in PyPy. This is a technique that recently came to +the forefront of the multi-core scene. It promises to offer multi-core CPU +usage in a single process. +In particular, by modifying the core of the event systems +mentioned above, we will enable the use of multiple cores, without the +user needing to use explicitly the ``threading`` module. + +The first proposal was launched near the start of 2012 and has covered +much of the fundamental research, up to the point of getting a first +version of PyPy working in a very roughly reasonable state (after +collecting about USD$27'000, which is little more than half of the money +that was sought; hence the present second call for donations). + +We now propose fixing the remaining issues to obtaining a +really good GIL-free PyPy (described in `goal 1`_ below). We +will then focus on the various new features needed to actually use multiple +cores without explicitly using multithreading (`goal 2`_ below), up to +and including adapting some existing framework libraries, for +example Twisted, Tornado, Stackless, or gevent (`goal 3`_ below). + + + +In more detail +============== + +This is a call for financial help in implementing a version of PyPy able +to use multiple processors in a single process, called PyPy-TM; and +developing the APIs and libraries needed as well as enhancing commonly +available frameworks to use the new feature. The developers will be +Armin Rigo and Remi Meier and possibly others. + +We currently estimate the final performance goal to be a slow-down of +25% to 40% from the current non-TM PyPy; i.e. running a fully serial application would take between +1.25 and 1.40x the time it takes in a regular PyPy. This goal has +been reached already in some cases, but we need to make this result more +broadly applicable. We feel confident that we can reach this goal more +generally: the performance of PyPy-TM running any suitable +application should scale linearly or close-to-linearly with the number +of processors. This means that starting with two cores, such +applications should perform better than a non-TM PyPy. (All numbers +presented here are comparing different versions of PyPy which all have +the JIT enabled. A "suitable application" is one without many conflicts; +see `goal 2`_.) + +You will find below a sketch of the `work plan`_. We start with a Q&A. + + +What is the Global Interpreter Lock? +------------------------------------ + +The GIL, or Global Interpreter Lock, is a single lock in both CPython +and the regular PyPy. Every thread must acquire it in order to execute +Python bytecodes. This means that both with CPython and with the +regular PyPy, Python programs do not gain any benefit in term of +multicore performance even if they are using threads. + + +What is Transactional Memory? +----------------------------- + +`Transactional Memory`_ (TM) is a technique imported from +databases: every time we want to do a change to the processors' main +memory, we do it in a "transaction". Multiple transactions can be +executed in parallel by multiple cores. When a transaction is complete, +we try to commit it. This might either succeed, or (if another +transaction committed incompatible changes) fail. If it fails, which is +hopefully rare, we need to restart the transaction from scratch. + +Transactional Memory research has progressed a lot since two years ago, +notably with the introduction of Intel's Haswell_ processors, which +offer Hardware Transactional Memory (HTM). We discuss below why we +think HTM is, so far, still not suitable for our goals. + +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory +.. _Haswell: http://en.wikipedia.org/wiki/Haswell_%28microarchitecture%29 + + +.. _`update on HTM`: + +Hardware vs Software Transactional Memory +----------------------------------------- + +The idea of Transactional Memory was recently made popular by Intel's +Haswell_ processor (released in 2013). We could replace most of the +Software Transactional Memory (STM) library currently used inside PyPy +with a much smaller Hardware Transactional Memory (HTM) library based on +hardware features and running on Haswell-generation processors. This +has been attempted by Remi Meier recently. However, it seems that it +fails to scale as we would expect it to: the current generation of HTM +processors is limited to run small-scale transactions. Even the default +transaction size used in PyPy-STM is often too much for HTM; and +reducing this size increases overhead without completely solving the +problem. Based on this experience, it seems safe to say that right now +HTM-enabled processors lack the support that we need. + +Future processors might improve on various aspects. We are particularly +interested in `Virtualizing Transactional Memory`_, a 2005 paper that +describes the limits that we're running into and how to solve them more +generally. A CPU with support for the virtual memory described in this +paper would certainly be better for running PyPy-HTM. + +Another issue in HTM is sub-cache-line false conflicts (conflicts caused by two +independent objects that happens to live in the same cache line, which +is usually 64 bytes). This is in contrast with the current PyPy-STM, +which doesn't have false conflicts of this kind at all and might thus be +ultimately better for very-long-running transactions. We are not aware of +published research discussing issues of sub-cache-line false conflicts. + +Note that right now PyPy-STM has false conflicts within the same object, +e.g. within a list or a dictionary; but we can easily do something +about it (see `goal 2_`). Also, it might be possible in PyPy-HTM to +arrange objects in memory ahead of time so that such conflicts are very +rare; but we will never get a rate of exactly 0%, which might be +required for very-long-running transactions. + +.. _`Virtualizing Transactional Memory`: http://pages.cs.wisc.edu/~isca2005/papers/08A-02.PDF + + +Why do TM with PyPy instead of CPython? +--------------------------------------- + +While there have been early experiments on Hardware Transactional Memory +with CPython (`Riley and Zilles (2006)`__, `Tabba (2010)`__), there has +been none in the past few years. To the best of our knowledge, +the closest is an attempt using `Haswell on the +Ruby interpreter`__. None of these attempts tries to do the same using +Software Transactional Memory. We would nowadays consider it possible +to adapt our stmgc-c7 library for CPython, but it would be a lot of +work, starting from changing the reference-counting garbage collection scheme. PyPy is +better designed to be open to this kind of research. + +However, the best argument from an objective point of view is probably +that PyPy has already implemented a Just-in-Time compiler. It is thus +starting from a better position in terms of performance, particularly +for the long-running kind of programs that we target here. + +.. __: http://sabi.net/nriley/pubs/dls6-riley.pdf +.. __: http://www.cs.auckland.ac.nz/~fuad/parpycan.pdf +.. __: http://researcher.watson.ibm.com/researcher/files/jp-ODAIRA/PPoPP2014_RubyGILHTM.pdf + + +Alternatives +------------ + +PyPy-TM will be slower than judicious usage of existing alternatives, +based on multiple processes that communicate with each other in one way +or another. The counter-argument is that TM is not only a cleaner +solution: there are cases in which it is not really possible to organize (or +retrofit) an existing program into the particular format needed for the +alternatives. In particular, small quickly-written programs don't need +the additional baggage of cross-process communication; and large +programs can sometimes be almost impossible to turn into multi-process +versions. By contrast, we believe that TM can fit naturally into most +programs, because it only requires local changes to some dispatcher; the +rest of the program should work without changes. + + +Platforms other than the x86-64 Linux +------------------------------------- + +The current solution depends on having a +huge address space available. Porting to any 32-bit +architecture would quickly run into the limitation of a 2GB or 4GB of address space. +The way TM works right now would further divide this +limit by N+1, where N is the number of segments. It might be possible +to create partially different memory views for multiple threads that +each access the same range of addresses; but this would likely require +changes inside the OS. We didn't investigate so far. + +The current 64-bit version relies +heavily on Linux- and clang-only features. We believe it is a suitable +restriction: a lot of multi- and many-core servers commonly available +are nowadays x86-64 machines running Linux. Nevertheless, non-Linux +solutions appear to be possible as well. OS/X (and likely the various +BSDs) seems to handle ``mmap()`` better than Linux does, and can remap +individual pages of an existing mapping to various pages without hitting +a limit of 65536 like Linux. Windows might also have a solution, although we +didn't measure yet; but first we would need a 64-bit Windows PyPy, which has +not seen much active support. + +We will likely explore the OS/X path (as well as the Windows path if Win64 +support grows in PyPy), but this is not part of this current +donation proposal. + +It might be possible to adapt the work done on x86-64 to the 64-bit +ARMv8 as well. We have not investigated this so far. + + +More readings +------------- + +See `our blog posts about STM`__. + +.. __: http://morepypy.blogspot.com/search/label/stm + + + +Work plan +========= + +This is an very rough estimate of the amount of work it would take to +complete the steps for an experienced developer who is already familiar +with the PyPy codebase. As before, we cannot guarantee the time +estimates here, but we do agree to report regularly to the community, so +our progress can be followed publicly. We currently expect the duration +of the whole project to be up to two years starting from April 2014. + +Paid work will be at $60/hour, but at least one developer who will work +on the project – Armin Rigo – has committed to one hour of volunteer +work per paid hour; and another developer – Remi Meier – is a Ph.D. student +and gets paid from another source already. The total amount of money +that we ask below corresponds roughly to one half-time job. + +A 10% general donation will go to the `Software Freedom +Conservancy`_ itself, the non-profit organization of which the PyPy +project is a member and which manages all the issues related to +donations, payments, and tax-exempt status. +An extra fraction of the money collected will be entered into the +general PyPy pot, used for example to finance sprint travel costs to +students. This fraction is 10% maximum, unless more money than +requested is collected, in which case the whole excess will go to +the general PyPy pot. + +**Note** For donations higher than $1,000, we can arrange for an invoice +and a different payment method to avoid the high Paypal fees. Please +contact pypy at sfconservancy.org if you want to know details on how +to donate via other means. + +.. _`Software Freedom Conservancy`: http://sfconservancy.org/ + + +Goal 1 +------ + +The PyPy-TM that we have in the end of March 2014 is good enough in +some cases to run existing multithreaded code without a GIL, but not in +all of them. There are a number of caveats for the user and missing +optimizations. The goal #1 is to improve this case and address +the caveats. The current status is written down `in the docs`__ and +will evolve over time. + +.. __: https://pypy.readthedocs.org/en/latest/stm.html + +For future reference, at the end of March the main identified issues +are: + +* There are still a number of bugs. + +* The JIT warm-up time is abysmal. + +* The GC is missing a number of optimizations that are present in + a regular PyPy. + +* Destructors are not supported (``__del__()`` methods). + +* The STM bookkeeping logic could see more improvements. + +* Forking the process is slow. + +* We don't foresee particularly high conflict rates in regular + multithreaded programs, but this assertion needs to be checked + and possibly some heuristics improved. + +Fixing all these issues is required before we can confidently say that +PyPy-TM is an out-of-the-box replacement of a regular PyPy which gives +speed-ups over the regular PyPy independently of the Python program it +runs, as long as it is using at least two threads. + + +Goal 2 +------ + +This goal contains the various new features needed to use multiple cores +without explicitly using multithreading; in other words, the new APIs +and libraries accessible from Python programs that want to make use of +this benefit. + +This goal requires good support for very-long-running transactions, +started with the ``with atomic`` construct documented here__. This +approach hides the notion of threads from the end programmer, including +all the hard multithreading-related issues. This is not the first +alternative approach to explicit threads; for example, OpenMP_ is one. +However, it is one of the first ones which does not require the code to +be organized in a particular fashion. Instead, it works on any Python +program which has got latent, imperfect parallelism. Ideally, it only +requires that the end programmer identifies where this parallelism is +likely to be found, and communicates it to the system, using some +lightweight library on top of ``with atomic``. + +However, this introduces new issues. The main one is that by forcing +transactions to be longer, "conflicts" will become more common, up to +the point of partially or completely offsetting the benefit of using +PyPy-TM in the first place. + +So the programmer using PyPy-TM needs a way to get +feedback about what conflicts we get in these long-running transactions, +and where they are produced. A first step will be to implement getting +"tracebacks" that point to the places where the most time is lost. This +could be later integrated into some "profiler"-like variant where we can +navigate the conflicts, either in a live program or based on data logs. + +Some of these conflicts can be solved by improving PyPy-TM directly. +The system works on the granularity of objects and doesn't generate +false conflicts, but some conflicts may be regarded as "false" anyway: +these involve most importantly the built-in dictionary type, for which +we would like accesses and writes using independent keys to be truly +independent. Other built-in data structures have a similar issue, like +lists: ideally, writes to different indexes should not cause conflicts; +but more generally, we would need a mechanism, possibly under the +control of the application, to do things like append an item to a list +in a "delayed" manner, to avoid conflicts. + +.. __: https://pypy.readthedocs.org/en/latest/stm.html +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP + +Similarly, we might need a way to delay some I/O: doing it only at the +end of the transaction rather than immediately, in order to prevent the +whole transaction from turning inevitable. + +The goal 2 is thus the development of tools to inspect and fix the +causes of conflicts, as well as fixing the ones that are apparent inside +PyPy-TM directly. + + +Goal 3 +------ + +The third goal is to look at some existing event-based frameworks (for +example Twisted, Tornado, Stackless, gevent, ...) and attempt to make +them use threads and atomic sections internally. We would appreciate +help and feedback from people more involved in these frameworks, of +course. + +The idea is to apply the techniques described in the `goal 2`_ until we +get a version of framework X which can transparently parallelize the +dispatching and execution of multiple events. This might require some slight +reorganization of the core in order to split the I/O and the actual +logic into separate transactions. + + +Funding +------- + +We forecast that goal 1 and a good chunk of goal 2 should be reached in +around 6 months of work. The remaining parts of goal 2 as well as goal +3 are likely to be more open-ended jobs. We will go with a total +estimate of two years in order to get a final, well-tested PyPy-STM with +stable performance. The amount sought by this fundraising campaign is +USD$80'000, corresponding to one half-time job for 16 months (1200 hours +at $60/hour plus 10% overhead). + + +Benefits of This Work to the Python Community and the General Public +==================================================================== + +Python has become one of the most popular dynamic programming languages in +the world. Web developers, educators, and scientific programmers alike +all value Python because Python code is often more readable and because +Python often increases programmer productivity. + +Traditionally, languages like Python ran more slowly than static, compiled +languages; Python developers chose to sacrifice execution speed for ease +of programming. The PyPy project created a substantially improved Python +language implementation, including a fast Just-in-time (JIT) compiler. +The increased execution speed that PyPy provides has attracted many users, +who now find their Python code runs between 2 and 50 times faster under PyPy +than under the reference implementation written in C. + +However, in the presence of today's machines with multiple processors, +Python progress lags behind. The issue has been described in the +introduction: developers that really need to use multiple CPUs are +constrained to select and use one of the multi-process solutions that +are all in some way or another hacks requiring extra knowledge and +efforts to use. The focus of the work described in this proposal is to +offer an alternative in the core of the Python language — an +alternative that can naturally integrate with the rest of the program. +This alternative is implemented in PyPy. + +PyPy's developers make all PyPy software available to the public without +charge, under PyPy's Open Source copyright license, the permissive MIT +License. PyPy's license assures that PyPy is equally available to +everyone freely on terms that allow both non-commercial and commercial +activity. This license allows for academics, for-profit software +developers, volunteers and enthusiasts alike to collaborate together to +make a better Python implementation for everyone. + +PyPy-TM is and continues to be available under the same license. Being +licensed freely to the general public means that opportunities to use, +improve and learn about how Transactional Memory works itself will be +generally available to everyone. diff --git a/tmdonate2.html b/tmdonate2.html new file mode 100644 --- /dev/null +++ b/tmdonate2.html @@ -0,0 +1,399 @@ + + + + PyPy - 2nd Call for donations - Transactional Memory in PyPy + + + + + + + + + + + + + + + + + +

+ +
+
+
+

2nd Call for donations - Transactional Memory in PyPy

+

This is the second call for donations on the topic of Transactional +Memory ™ in PyPy, a way to run CPU-hungry Python programs in +multithreaded mode. It is a follow-up on our first call for +donations from two years ago. At that time, we suggested a +single-threaded slow-down of somewhere between 2x and 5x. The aim that +seems now within reach is rather closer to 1.25x, i.e. running only 25% +slower than the regular PyPy.

+

We achieved – or overachieved – most goals laid out in the first call by +a large margin, while at the same time raising only about half the +money. The result of this first step is described in the documentation +of PyPy.

+

The present proposal is about development of the second half: starting +from the various missing low-level optimizations, it will most +importantly focus on developing the Python-facing interface. This +includes both internal things (e.g. do dictionaries need to be more +TM-friendly in general?) as well as directly visible things (e.g. some +profiler-like interface to explore common conflicts in a program). It +also includes exploring and tweaking some existing libraries to improve +their TM-friendliness (e.g. Twisted and Stackless).

+

See also the update on HTM below.

+
+

Introduction

+

In the presence of today's machines with multiple processors, Python +progress is lagging behind: on any CPU-constrained program, developers +have a difficult choice to make. They can use in-process solutions that +do not offer multi-CPU usage. In this respect, the natural choice +nowadays is to use Twisted or other event-based paradigms, or systems +that hide events in the control flow, like Stackless; or alternatively, +they can use the existing threading module, with its associated GIL +and the complexities of real multi-threaded programming (locks, +deadlocks, races, etc.), which make this solution less attractive. The +most attractive alternative for most developers is to rely on one of various multi-process +solutions that are outside the scope of the core Python language. All of them require a +major restructuring of the program and often need extreme care and extra +knowledge to use them.

+

We propose an implemention of +Transactional Memory in PyPy. This is a technique that recently came to +the forefront of the multi-core scene. It promises to offer multi-core CPU +usage in a single process. +In particular, by modifying the core of the event systems +mentioned above, we will enable the use of multiple cores, without the +user needing to use explicitly the threading module.

+

The first proposal was launched near the start of 2012 and has covered +much of the fundamental research, up to the point of getting a first +version of PyPy working in a very roughly reasonable state (after +collecting about USD$27'000, which is little more than half of the money +that was sought; hence the present second call for donations).

+

We now propose fixing the remaining issues to obtaining a +really good GIL-free PyPy (described in goal 1 below). We +will then focus on the various new features needed to actually use multiple +cores without explicitly using multithreading (goal 2 below), up to +and including adapting some existing framework libraries, for +example Twisted, Tornado, Stackless, or gevent (goal 3 below).

+
+
+

In more detail

+

This is a call for financial help in implementing a version of PyPy able +to use multiple processors in a single process, called PyPy-TM; and +developing the APIs and libraries needed as well as enhancing commonly +available frameworks to use the new feature. The developers will be +Armin Rigo and Remi Meier and possibly others.

+

We currently estimate the final performance goal to be a slow-down of +25% to 40% from the current non-TM PyPy; i.e. running a fully serial application would take between +1.25 and 1.40x the time it takes in a regular PyPy. This goal has +been reached already in some cases, but we need to make this result more +broadly applicable. We feel confident that we can reach this goal more +generally: the performance of PyPy-TM running any suitable +application should scale linearly or close-to-linearly with the number +of processors. This means that starting with two cores, such +applications should perform better than a non-TM PyPy. (All numbers +presented here are comparing different versions of PyPy which all have +the JIT enabled. A “suitable application” is one without many conflicts; +see goal 2.)

+

You will find below a sketch of the work plan. We start with a Q&A.

+
+

What is the Global Interpreter Lock?

+

The GIL, or Global Interpreter Lock, is a single lock in both CPython +and the regular PyPy. Every thread must acquire it in order to execute +Python bytecodes. This means that both with CPython and with the +regular PyPy, Python programs do not gain any benefit in term of +multicore performance even if they are using threads.

+
+
+

What is Transactional Memory?

+

Transactional Memory ™ is a technique imported from +databases: every time we want to do a change to the processors' main +memory, we do it in a “transaction”. Multiple transactions can be +executed in parallel by multiple cores. When a transaction is complete, +we try to commit it. This might either succeed, or (if another +transaction committed incompatible changes) fail. If it fails, which is +hopefully rare, we need to restart the transaction from scratch.

+

Transactional Memory research has progressed a lot since two years ago, +notably with the introduction of Intel's Haswell processors, which +offer Hardware Transactional Memory (HTM). We discuss below why we +think HTM is, so far, still not suitable for our goals.

+
+
+

Hardware vs Software Transactional Memory

+

The idea of Transactional Memory was recently made popular by Intel's +Haswell processor (released in 2013). We could replace most of the +Software Transactional Memory (STM) library currently used inside PyPy +with a much smaller Hardware Transactional Memory (HTM) library based on +hardware features and running on Haswell-generation processors. This +has been attempted by Remi Meier recently. However, it seems that it +fails to scale as we would expect it to: the current generation of HTM +processors is limited to run small-scale transactions. Even the default +transaction size used in PyPy-STM is often too much for HTM; and +reducing this size increases overhead without completely solving the +problem. Based on this experience, it seems safe to say that right now +HTM-enabled processors lack the support that we need.

+

Future processors might improve on various aspects. We are particularly +interested in Virtualizing Transactional Memory, a 2005 paper that +describes the limits that we're running into and how to solve them more +generally. A CPU with support for the virtual memory described in this +paper would certainly be better for running PyPy-HTM.

+

Another issue in HTM is sub-cache-line false conflicts (conflicts caused by two +independent objects that happens to live in the same cache line, which +is usually 64 bytes). This is in contrast with the current PyPy-STM, +which doesn't have false conflicts of this kind at all and might thus be +ultimately better for very-long-running transactions. We are not aware of +published research discussing issues of sub-cache-line false conflicts.

+

Note that right now PyPy-STM has false conflicts within the same object, +e.g. within a list or a dictionary; but we can easily do something +about it (see goal 2_). Also, it might be possible in PyPy-HTM to +arrange objects in memory ahead of time so that such conflicts are very +rare; but we will never get a rate of exactly 0%, which might be +required for very-long-running transactions.

+
+
+

Why do TM with PyPy instead of CPython?

+

While there have been early experiments on Hardware Transactional Memory +with CPython (Riley and Zilles (2006), Tabba (2010)), there has +been none in the past few years. To the best of our knowledge, +the closest is an attempt using Haswell on the +Ruby interpreter. None of these attempts tries to do the same using +Software Transactional Memory. We would nowadays consider it possible +to adapt our stmgc-c7 library for CPython, but it would be a lot of +work, starting from changing the reference-counting garbage collection scheme. PyPy is +better designed to be open to this kind of research.

+

However, the best argument from an objective point of view is probably +that PyPy has already implemented a Just-in-Time compiler. It is thus +starting from a better position in terms of performance, particularly +for the long-running kind of programs that we target here.

+
+
+

Alternatives

+

PyPy-TM will be slower than judicious usage of existing alternatives, +based on multiple processes that communicate with each other in one way +or another. The counter-argument is that TM is not only a cleaner +solution: there are cases in which it is not really possible to organize (or +retrofit) an existing program into the particular format needed for the +alternatives. In particular, small quickly-written programs don't need +the additional baggage of cross-process communication; and large +programs can sometimes be almost impossible to turn into multi-process +versions. By contrast, we believe that TM can fit naturally into most +programs, because it only requires local changes to some dispatcher; the +rest of the program should work without changes.

+
+
+

Platforms other than the x86-64 Linux

+

The current solution depends on having a +huge address space available. Porting to any 32-bit +architecture would quickly run into the limitation of a 2GB or 4GB of address space. +The way TM works right now would further divide this +limit by N+1, where N is the number of segments. It might be possible +to create partially different memory views for multiple threads that +each access the same range of addresses; but this would likely require +changes inside the OS. We didn't investigate so far.

+

The current 64-bit version relies +heavily on Linux- and clang-only features. We believe it is a suitable +restriction: a lot of multi- and many-core servers commonly available +are nowadays x86-64 machines running Linux. Nevertheless, non-Linux +solutions appear to be possible as well. OS/X (and likely the various +BSDs) seems to handle mmap() better than Linux does, and can remap +individual pages of an existing mapping to various pages without hitting +a limit of 65536 like Linux. Windows might also have a solution, although we +didn't measure yet; but first we would need a 64-bit Windows PyPy, which has +not seen much active support.

+

We will likely explore the OS/X path (as well as the Windows path if Win64 +support grows in PyPy), but this is not part of this current +donation proposal.

+

It might be possible to adapt the work done on x86-64 to the 64-bit +ARMv8 as well. We have not investigated this so far.

+
+
+

More readings

+

See our blog posts about STM.

+
+
+
+

Work plan

+

This is an very rough estimate of the amount of work it would take to +complete the steps for an experienced developer who is already familiar +with the PyPy codebase. As before, we cannot guarantee the time +estimates here, but we do agree to report regularly to the community, so +our progress can be followed publicly. We currently expect the duration +of the whole project to be up to two years starting from April 2014.

+

Paid work will be at $60/hour, but at least one developer who will work +on the project – Armin Rigo – has committed to one hour of volunteer +work per paid hour; and another developer – Remi Meier – is a Ph.D. student +and gets paid from another source already. The total amount of money +that we ask below corresponds roughly to one half-time job.

+

A 10% general donation will go to the Software Freedom +Conservancy itself, the non-profit organization of which the PyPy +project is a member and which manages all the issues related to +donations, payments, and tax-exempt status. +An extra fraction of the money collected will be entered into the +general PyPy pot, used for example to finance sprint travel costs to +students. This fraction is 10% maximum, unless more money than +requested is collected, in which case the whole excess will go to +the general PyPy pot.

+

Note For donations higher than $1,000, we can arrange for an invoice +and a different payment method to avoid the high Paypal fees. Please +contact pypy at sfconservancy.org if you want to know details on how +to donate via other means.

+
+

Goal 1

+

The PyPy-TM that we have in the end of March 2014 is good enough in +some cases to run existing multithreaded code without a GIL, but not in +all of them. There are a number of caveats for the user and missing +optimizations. The goal #1 is to improve this case and address +the caveats. The current status is written down in the docs and +will evolve over time.

+

For future reference, at the end of March the main identified issues +are:

+
    +
  • There are still a number of bugs.
  • +
  • The JIT warm-up time is abysmal.
  • +
  • The GC is missing a number of optimizations that are present in +a regular PyPy.
  • +
  • Destructors are not supported (__del__() methods).
  • +
  • The STM bookkeeping logic could see more improvements.
  • +
  • Forking the process is slow.
  • +
  • We don't foresee particularly high conflict rates in regular +multithreaded programs, but this assertion needs to be checked +and possibly some heuristics improved.
  • +
+

Fixing all these issues is required before we can confidently say that +PyPy-TM is an out-of-the-box replacement of a regular PyPy which gives +speed-ups over the regular PyPy independently of the Python program it +runs, as long as it is using at least two threads.

+
+
+

Goal 2

+

This goal contains the various new features needed to use multiple cores +without explicitly using multithreading; in other words, the new APIs +and libraries accessible from Python programs that want to make use of +this benefit.

+

This goal requires good support for very-long-running transactions, +started with the with atomic construct documented here. This +approach hides the notion of threads from the end programmer, including +all the hard multithreading-related issues. This is not the first +alternative approach to explicit threads; for example, OpenMP is one. +However, it is one of the first ones which does not require the code to +be organized in a particular fashion. Instead, it works on any Python +program which has got latent, imperfect parallelism. Ideally, it only +requires that the end programmer identifies where this parallelism is +likely to be found, and communicates it to the system, using some +lightweight library on top of with atomic.

+

However, this introduces new issues. The main one is that by forcing +transactions to be longer, “conflicts” will become more common, up to +the point of partially or completely offsetting the benefit of using +PyPy-TM in the first place.

+

So the programmer using PyPy-TM needs a way to get +feedback about what conflicts we get in these long-running transactions, +and where they are produced. A first step will be to implement getting +“tracebacks” that point to the places where the most time is lost. This +could be later integrated into some “profiler”-like variant where we can +navigate the conflicts, either in a live program or based on data logs.

+

Some of these conflicts can be solved by improving PyPy-TM directly. +The system works on the granularity of objects and doesn't generate +false conflicts, but some conflicts may be regarded as “false” anyway: +these involve most importantly the built-in dictionary type, for which +we would like accesses and writes using independent keys to be truly +independent. Other built-in data structures have a similar issue, like +lists: ideally, writes to different indexes should not cause conflicts; +but more generally, we would need a mechanism, possibly under the +control of the application, to do things like append an item to a list +in a “delayed” manner, to avoid conflicts.

+

Similarly, we might need a way to delay some I/O: doing it only at the +end of the transaction rather than immediately, in order to prevent the +whole transaction from turning inevitable.

+

The goal 2 is thus the development of tools to inspect and fix the +causes of conflicts, as well as fixing the ones that are apparent inside +PyPy-TM directly.

+
+
+

Goal 3

+

The third goal is to look at some existing event-based frameworks (for +example Twisted, Tornado, Stackless, gevent, …) and attempt to make +them use threads and atomic sections internally. We would appreciate +help and feedback from people more involved in these frameworks, of +course.

+

The idea is to apply the techniques described in the goal 2 until we +get a version of framework X which can transparently parallelize the +dispatching and execution of multiple events. This might require some slight +reorganization of the core in order to split the I/O and the actual +logic into separate transactions.

+
+
+

Funding

+

We forecast that goal 1 and a good chunk of goal 2 should be reached in +around 6 months of work. The remaining parts of goal 2 as well as goal +3 are likely to be more open-ended jobs. We will go with a total +estimate of two years in order to get a final, well-tested PyPy-STM with +stable performance. The amount sought by this fundraising campaign is +USD$80'000, corresponding to one half-time job for 16 months (1200 hours +at $60/hour plus 10% overhead).

+
+
+
+

Benefits of This Work to the Python Community and the General Public

+

Python has become one of the most popular dynamic programming languages in +the world. Web developers, educators, and scientific programmers alike +all value Python because Python code is often more readable and because +Python often increases programmer productivity.

+

Traditionally, languages like Python ran more slowly than static, compiled +languages; Python developers chose to sacrifice execution speed for ease +of programming. The PyPy project created a substantially improved Python +language implementation, including a fast Just-in-time (JIT) compiler. +The increased execution speed that PyPy provides has attracted many users, +who now find their Python code runs between 2 and 50 times faster under PyPy +than under the reference implementation written in C.

+

However, in the presence of today's machines with multiple processors, +Python progress lags behind. The issue has been described in the +introduction: developers that really need to use multiple CPUs are +constrained to select and use one of the multi-process solutions that +are all in some way or another hacks requiring extra knowledge and +efforts to use. The focus of the work described in this proposal is to +offer an alternative in the core of the Python language — an +alternative that can naturally integrate with the rest of the program. +This alternative is implemented in PyPy.

+

PyPy's developers make all PyPy software available to the public without +charge, under PyPy's Open Source copyright license, the permissive MIT +License. PyPy's license assures that PyPy is equally available to +everyone freely on terms that allow both non-commercial and commercial +activity. This license allows for academics, for-profit software +developers, volunteers and enthusiasts alike to collaborate together to +make a better Python implementation for everyone.

+

PyPy-TM is and continues to be available under the same license. Being +licensed freely to the general public means that opportunities to use, +improve and learn about how Transactional Memory works itself will be +generally available to everyone.

+
+
+ +
+
+
+ + \ No newline at end of file From noreply at buildbot.pypy.org Tue Apr 8 15:01:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 15:01:34 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add a 'draft' mention Message-ID: <20140408130134.6C6D31D241B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r479:79029a9ec90b Date: 2014-04-08 15:01 +0200 http://bitbucket.org/pypy/pypy.org/changeset/79029a9ec90b/ Log: Add a 'draft' mention diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -7,6 +7,8 @@ Transactional Memory, 2nd Call ============================== +**DRAFT** + This is the second call for donations on the topic of Transactional Memory (TM) in PyPy, a way to run CPU-hungry Python programs in diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -45,6 +45,7 @@

2nd Call for donations - Transactional Memory in PyPy

+

DRAFT

This is the second call for donations on the topic of Transactional Memory ™ in PyPy, a way to run CPU-hungry Python programs in multithreaded mode. It is a follow-up on our first call for From noreply at buildbot.pypy.org Tue Apr 8 15:40:01 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 8 Apr 2014 15:40:01 +0200 (CEST) Subject: [pypy-commit] benchmarks default: move "import Image" so that it is not required for the benchmark Message-ID: <20140408134001.16D591C0320@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r245:08c5d23c7221 Date: 2014-04-08 15:40 +0200 http://bitbucket.org/pypy/benchmarks/changeset/08c5d23c7221/ Log: move "import Image" so that it is not required for the benchmark diff --git a/multithread/mandelbrot/mandelbrot.py b/multithread/mandelbrot/mandelbrot.py --- a/multithread/mandelbrot/mandelbrot.py +++ b/multithread/mandelbrot/mandelbrot.py @@ -1,5 +1,5 @@ from common.abstract_threading import Future, atomic -import Image, sys +import sys def calculate(a, b, im_size, max_iter=255): @@ -28,6 +28,7 @@ return result def save_img(image, file_name='out.png'): + import Image im = Image.new("RGB", (len(image[0]), len(image))) out = im.load() From noreply at buildbot.pypy.org Tue Apr 8 16:54:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 16:54:49 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Update (hopefully) all links to tmdonate.html to go to tmdonate2.html, Message-ID: <20140408145449.5AC8C1C3549@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r480:1206e483bdc7 Date: 2014-04-08 16:43 +0200 http://bitbucket.org/pypy/pypy.org/changeset/1206e483bdc7/ Log: Update (hopefully) all links to tmdonate.html to go to tmdonate2.html, update the link names, the percentage we got, etc. diff --git a/archive.html b/archive.html --- a/archive.html +++ b/archive.html @@ -38,7 +38,7 @@


- +
diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -38,7 +38,7 @@

- +
diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -38,7 +38,7 @@

- +
diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -1,6 +1,6 @@

- +
diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -38,7 +38,7 @@

- +
diff --git a/howtohelp.html b/howtohelp.html --- a/howtohelp.html +++ b/howtohelp.html @@ -38,7 +38,7 @@

- +
diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -38,7 +38,7 @@

- +
diff --git a/numpydonate.html b/numpydonate.html --- a/numpydonate.html +++ b/numpydonate.html @@ -38,7 +38,7 @@

- +
diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -38,7 +38,7 @@

- +
diff --git a/performance.html b/performance.html --- a/performance.html +++ b/performance.html @@ -38,7 +38,7 @@

- +
diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -38,7 +38,7 @@

- +
diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi --- a/source/_layouts/site.genshi +++ b/source/_layouts/site.genshi @@ -15,7 +15,7 @@ ('Contact', 'contact.html'), ('Py3k donations', 'py3donate.html'), ('NumPy donations', 'numpydonate.html'), - ('STM/AME donations', 'tmdonate.html'), + ('STM donations', 'tmdonate2.html'), ], } diff --git a/source/tmdonate.txt b/source/tmdonate.txt --- a/source/tmdonate.txt +++ b/source/tmdonate.txt @@ -7,17 +7,13 @@ Transactional Memory / Automatic Mutual Exclusion ================================================= -UPDATE (February 2014): +.. class:: download_menu - *Thanks to our donors, we have raised 52% of - the total so far. Work on this topic has been happening, and - continues to happen, within the budget --- even if not within the - timeline described below. We have simply not found enough time to - work on it as much as we wanted, and thus did not consume the money as - quickly as predicted. The ratio "progress / $ used" so far - corresponds roughly to what we expected. The document below is the - original call for proposal, and we still accept donations for this - topic.* + **UPDATE (April 2014): this is the old Call for Donations about + Transactional Memory**, kept around for historical purposes. A `new Call + for Donations`__ is available. + +.. __: tmdonate2.html Introduction diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -7,8 +7,36 @@ Transactional Memory, 2nd Call ============================== -**DRAFT** +.. class:: download_menu + **--- DRAFT VERSION ---** + + * `Preamble`_ + + * `Introduction`_ + + * `In more detail`_ + + - `What is the Global Interpreter Lock?`_ + - `What is Transactional Memory?`_ + - `Hardware vs Software Transactional Memory`_ + - `Why do TM with PyPy instead of CPython?`_ + - `Alternatives`_ + - `Platforms other than the x86-64 Linux`_ + - `More readings`_ + + * `Work plan`_ + + - `Goal 1`_ + - `Goal 2`_ + - `Goal 3`_ + - `Funding`_ + + * `Benefits of This Work to the Python Community and the General Public`_ + + +Preamble +======== This is the second call for donations on the topic of Transactional Memory (TM) in PyPy, a way to run CPU-hungry Python programs in diff --git a/sponsor.html b/sponsor.html --- a/sponsor.html +++ b/sponsor.html @@ -38,7 +38,7 @@

- +
diff --git a/success.html b/success.html --- a/success.html +++ b/success.html @@ -38,7 +38,7 @@

- +
diff --git a/tmdonate.html b/tmdonate.html --- a/tmdonate.html +++ b/tmdonate.html @@ -38,24 +38,16 @@

- +

Call for donations - Transactional Memory / Automatic Mutual Exclusion in PyPy

-

UPDATE (February 2014):

-
-Thanks to our donors, we have raised 52% of -the total so far. Work on this topic has been happening, and -continues to happen, within the budget – even if not within the -timeline described below. We have simply not found enough time to -work on it as much as we wanted, and thus did not consume the money as -quickly as predicted. The ratio “progress / $ used” so far -corresponds roughly to what we expected. The document below is the -original call for proposal, and we still accept donations for this -topic.
+

UPDATE (April 2014): this is the old Call for Donations about +Transactional Memory, kept around for historical purposes. A new Call +for Donations is available.

Introduction

In the presence of today's machines with multiple processors, Python diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -38,14 +38,38 @@


- +

2nd Call for donations - Transactional Memory in PyPy

-

DRAFT

+

– DRAFT VERSION –

+ +
+

Preamble

This is the second call for donations on the topic of Transactional Memory ™ in PyPy, a way to run CPU-hungry Python programs in multithreaded mode. It is a follow-up on our first call for @@ -66,6 +90,7 @@ also includes exploring and tweaking some existing libraries to improve their TM-friendliness (e.g. Twisted and Stackless).

See also the update on HTM below.

+

Introduction

In the presence of today's machines with multiple processors, Python From noreply at buildbot.pypy.org Tue Apr 8 16:54:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 16:54:50 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Expand section title. Message-ID: <20140408145450.8F8391C3549@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r481:fb084b8b144e Date: 2014-04-08 16:54 +0200 http://bitbucket.org/pypy/pypy.org/changeset/fb084b8b144e/ Log: Expand section title. diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -25,7 +25,7 @@ - `Platforms other than the x86-64 Linux`_ - `More readings`_ - * `Work plan`_ + * `Work plan and funding details`_ - `Goal 1`_ - `Goal 2`_ @@ -282,8 +282,10 @@ -Work plan -========= +.. _`work plan`: + +Work plan and funding details +============================= This is an very rough estimate of the amount of work it would take to complete the steps for an experienced developer who is already familiar diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -59,7 +59,7 @@

  • More readings
  • -
  • Work plan
  • -
    -

    Work plan

    +
    +

    Work plan and funding details

    This is an very rough estimate of the amount of work it would take to complete the steps for an experienced developer who is already familiar with the PyPy codebase. As before, we cannot guarantee the time From noreply at buildbot.pypy.org Tue Apr 8 17:06:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 17:06:32 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Link to pypy/doc/stm from here too Message-ID: <20140408150632.B72B61D2386@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r482:a0ea9f9091f3 Date: 2014-04-08 17:06 +0200 http://bitbucket.org/pypy/pypy.org/changeset/a0ea9f9091f3/ Log: Link to pypy/doc/stm from here too diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -276,8 +276,9 @@ More readings ------------- -See `our blog posts about STM`__. +See the `STM page of our documentation`__ and `our blog posts about STM`__. +.. __: http://pypy.readthedocs.org/en/latest/stm.html .. __: http://morepypy.blogspot.com/search/label/stm @@ -328,7 +329,7 @@ the caveats. The current status is written down `in the docs`__ and will evolve over time. -.. __: https://pypy.readthedocs.org/en/latest/stm.html +.. __: http://pypy.readthedocs.org/en/latest/stm.html For future reference, at the end of March the main identified issues are: @@ -399,7 +400,7 @@ control of the application, to do things like append an item to a list in a "delayed" manner, to avoid conflicts. -.. __: https://pypy.readthedocs.org/en/latest/stm.html +.. __: http://pypy.readthedocs.org/en/latest/stm.html .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP Similarly, we might need a way to delay some I/O: doing it only at the diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -257,7 +257,7 @@

    @@ -292,7 +292,7 @@ some cases to run existing multithreaded code without a GIL, but not in all of them. There are a number of caveats for the user and missing optimizations. The goal #1 is to improve this case and address -the caveats. The current status is written down in the docs and +the caveats. The current status is written down in the docs and will evolve over time.

    For future reference, at the end of March the main identified issues are:

    @@ -320,7 +320,7 @@ and libraries accessible from Python programs that want to make use of this benefit.

    This goal requires good support for very-long-running transactions, -started with the with atomic construct documented here. This +started with the with atomic construct documented here. This approach hides the notion of threads from the end programmer, including all the hard multithreading-related issues. This is not the first alternative approach to explicit threads; for example, OpenMP is one. From noreply at buildbot.pypy.org Tue Apr 8 17:14:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 17:14:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Activate the link to tmdonate2 Message-ID: <20140408151427.64F721C10C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70487:ff6ad0e106d9 Date: 2014-04-08 17:13 +0200 http://bitbucket.org/pypy/pypy/changeset/ff6ad0e106d9/ Log: Activate the link to tmdonate2 diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,11 +15,11 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. Thanks to all donors -for crowd-funding the work so far! Please have a look at the 2nd call -for donation (*not ready yet*) +This work was done mostly by Remi Meier and Armin Rigo. Thanks to all +donors for crowd-funding the work so far! Please have a look at the +`2nd call for donation`_. -.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction From noreply at buildbot.pypy.org Tue Apr 8 17:21:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 17:21:05 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Fight against "™" Message-ID: <20140408152105.DB6871C3001@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r483:eea549ad85d2 Date: 2014-04-08 17:20 +0200 http://bitbucket.org/pypy/pypy.org/changeset/eea549ad85d2/ Log: Fight against "™" diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -39,7 +39,7 @@ ======== This is the second call for donations on the topic of Transactional -Memory (TM) in PyPy, a way to run CPU-hungry Python programs in +Memory (*TM*) in PyPy, a way to run CPU-hungry Python programs in multithreaded mode. It is a follow-up on our `first call for donations`_ from two years ago. At that time, we suggested a single-threaded slow-down of somewhere between 2x and 5x. The aim that @@ -145,7 +145,7 @@ What is Transactional Memory? ----------------------------- -`Transactional Memory`_ (TM) is a technique imported from +`Transactional Memory`_ (*TM*) is a technique imported from databases: every time we want to do a change to the processors' main memory, we do it in a "transaction". Multiple transactions can be executed in parallel by multiple cores. When a transaction is complete, diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -71,7 +71,7 @@

    Preamble

    This is the second call for donations on the topic of Transactional -Memory ™ in PyPy, a way to run CPU-hungry Python programs in +Memory (TM) in PyPy, a way to run CPU-hungry Python programs in multithreaded mode. It is a follow-up on our first call for donations from two years ago. At that time, we suggested a single-threaded slow-down of somewhere between 2x and 5x. The aim that @@ -155,7 +155,7 @@

    What is Transactional Memory?

    -

    Transactional Memory ™ is a technique imported from +

    Transactional Memory (TM) is a technique imported from databases: every time we want to do a change to the processors' main memory, we do it in a “transaction”. Multiple transactions can be executed in parallel by multiple cores. When a transaction is complete, From noreply at buildbot.pypy.org Tue Apr 8 18:10:08 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 8 Apr 2014 18:10:08 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: intermediate checkin Message-ID: <20140408161008.670CB1D2A89@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70488:8d213d6dda5c Date: 2014-04-08 17:49 +0200 http://bitbucket.org/pypy/pypy/changeset/8d213d6dda5c/ Log: intermediate checkin start refactoring generate_guards to no longer call generalization_of (which I ultimately want to deprecate). for now, carefully list all cases. intbounds are not supported yet. diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -31,6 +31,12 @@ def clone(self): return LenBound(self.mode, self.descr, self.bound.clone()) + def generalization_of(self, other): + return (other is not None and + self.mode == other.mode and + self.descr == other.descr and + self.lenbound.bound.contains_bound(other.lenbound.bound)) + class OptValue(object): __metaclass__ = extendabletype _attrs_ = ('box', 'known_class', 'last_guard', 'level', 'intbound', 'lenbound') diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -9,6 +9,7 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ equaloplists from rpython.jit.metainterp.optimizeopt.intutils import IntBound +from rpython.jit.metainterp.optimizeopt.virtualize import VirtualValue from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import ResOperation, rop @@ -144,13 +145,17 @@ class BaseTestGenerateGuards(BaseTest): - def guards(self, info1, info2, box_or_value, expected): + def _box_or_value(self, box_or_value): if isinstance(box_or_value, OptValue): value = box_or_value box = value.box else: box = box_or_value value = OptValue(box) + return value, box + + def guards(self, info1, info2, box_or_value, expected): + value, box = self._box_or_value(box_or_value) info1.position = info2.position = 0 guards = [] info1.generate_guards(info2, value, self.cpu, guards, {}) @@ -166,7 +171,139 @@ if op.is_guard(): op.setdescr(None) assert equaloplists(guards, loop.operations, False, - boxmap) + boxmap) + + def check_no_guards(self, info1, info2, box_or_value): + value, _ = self._box_or_value(box_or_value) + guards = [] + info1.generate_guards(info2, value, self.cpu, guards, {}) + assert not guards + + def check_invalid(self, info1, info2, box_or_value): + value, _ = self._box_or_value(box_or_value) + guards = [] + with py.test.raises(InvalidLoop): + info1.generate_guards(info2, value, self.cpu, guards, {}) + + def test_nonvirtual_all_combinations(self): + # set up infos + unknown_val = OptValue(self.nodebox) + unknownnull_val = OptValue(BoxPtr(self.nullptr)) + unknown_info = NotVirtualStateInfo(unknown_val) + + nonnull_val = OptValue(self.nodebox) + nonnull_val.make_nonnull(None) + nonnull_info = NotVirtualStateInfo(nonnull_val) + + knownclass_val = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + knownclass_val.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(knownclass_val) + knownclass2_val = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + knownclass2_val.make_constant_class(classbox, -1) + knownclass2_info = NotVirtualStateInfo(knownclass2_val) + + constant_val = OptValue(BoxInt()) + constant_val.make_constant(ConstInt(1)) + constant_info = NotVirtualStateInfo(constant_val) + constclass_val = OptValue(self.nodebox) + constclass_val.make_constant(self.nodebox.constbox()) + constclass_info = NotVirtualStateInfo(constclass_val) + constclass2_val = OptValue(self.nodebox) + constclass2_val.make_constant(self.nodebox2.constbox()) + constclass2_info = NotVirtualStateInfo(constclass2_val) + constantnull_val = OptValue(ConstPtr(self.nullptr)) + constantnull_info = NotVirtualStateInfo(constantnull_val) + + # unknown unknown + self.check_no_guards(unknown_info, unknown_info, unknown_val) + + # unknown nonnull + self.check_no_guards(unknown_info, nonnull_info, nonnull_val) + + # unknown knownclass + self.check_no_guards(unknown_info, knownclass_info, knownclass_val) + + # unknown constant + self.check_no_guards(unknown_info, constant_info, constant_val) + + + # nonnull unknown + expected = """ + [p0] + guard_nonnull(p0) [] + """ + self.guards(nonnull_info, unknown_info, unknown_val, expected) + self.check_invalid(nonnull_info, unknown_info, unknownnull_val) + + # nonnull nonnull + self.check_no_guards(nonnull_info, nonnull_info, nonnull_val) + + # nonnull knownclass + self.check_no_guards(nonnull_info, knownclass_info, knownclass_val) + + # nonnull constant + self.check_no_guards(nonnull_info, constant_info, constant_val) + self.check_invalid(nonnull_info, constantnull_info, constantnull_val) + + + # knownclass unknown + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(knownclass_info, unknown_info, unknown_val, expected) + self.check_invalid(knownclass_info, unknown_info, unknownnull_val) + self.check_invalid(knownclass_info, unknown_info, knownclass2_val) + + # knownclass nonnull + expected = """ + [p0] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(knownclass_info, nonnull_info, knownclass_val, expected) + self.check_invalid(knownclass_info, nonnull_info, knownclass2_val) + + # knownclass knownclass + self.check_no_guards(knownclass_info, knownclass_info, knownclass_val) + self.check_invalid(knownclass_info, knownclass2_info, knownclass2_val) + + # knownclass constant + self.check_invalid(knownclass_info, constantnull_info, constantnull_val) + self.check_invalid(knownclass_info, constclass2_info, constclass2_val) + + + # constant unknown + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, unknown_info, constant_val, expected) + self.check_invalid(constant_info, unknown_info, unknownnull_val) + + # constant nonnull + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, nonnull_info, constant_val, expected) + self.check_invalid(constant_info, nonnull_info, constclass2_val) + + # constant knownclass + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, knownclass_info, constant_val, expected) + self.check_invalid(constant_info, knownclass_info, unknownnull_val) + + # constant constant + self.check_no_guards(constant_info, constant_info, constant_val) + self.check_invalid(constant_info, constantnull_info, constantnull_val) + + def test_intbounds(self): value1 = OptValue(BoxInt(15)) value1.intbound.make_ge(IntBound(0, 10)) @@ -280,6 +417,34 @@ vstate1.generate_guards(vstate2, [value2], self.cpu, guards) self.compare(guards, expected, [box2]) + def test_generate_guards_on_virtual_fields_matches(self): + py.test.skip("not yet") + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + info1 = VirtualStateInfo(ConstInt(42), [1]) + info1.fieldstate = [innerinfo1] + + info2 = VirtualStateInfo(ConstInt(42), [1]) + info2.fieldstate = [innerinfo2] + + value1 = VirtualValue(self.cpu, constclassbox, BoxInt()) + value1._fields = {1: OptValue(self.nodebox)} + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected) + + # _________________________________________________________________________ + # the below tests don't really have anything to do with guard generation def test_virtuals_with_equal_fields(self): info1 = VirtualStateInfo(ConstInt(42), [1, 2]) @@ -681,7 +846,7 @@ """ self.optimize_bridge(loop, bridge, expected, p0=self.myptr) - def test_virtual(self): + def test_simple_virtual(self): loops = """ [p0, p1] p2 = new_with_vtable(ConstClass(node_vtable)) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -28,18 +28,23 @@ bad[self] = bad[other] = None return result - def generate_guards(self, other, value, cpu, extra_guards, renum): + def generate_guards(self, other, value, cpu, extra_guards, renum, bad=None): + if bad is None: + bad = {} assert isinstance(value, OptValue) - if self.generalization_of(other, renum, {}): - return - if renum[self.position] != other.position: - raise InvalidLoop('The numbering of the virtual states does not ' + - 'match. This means that two virtual fields ' + - 'have been set to the same Box in one of the ' + - 'virtual states but not in the other.') - self._generate_guards(other, value, cpu, extra_guards, renum) + if self.position in renum: + if renum[self.position] != other.position: + bad[self] = bad[other] = None + raise InvalidLoop('The numbering of the virtual states does not ' + + 'match. This means that two virtual fields ' + + 'have been set to the same Box in one of the ' + + 'virtual states but not in the other.') + else: + renum[self.position] = other.position + self._generate_guards(other, value, cpu, extra_guards, renum, bad) - def _generate_guards(self, other, value, cpu, extra_guards, renum): + def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + bad[self] = bad[other] = None raise InvalidLoop('Generating guards for making the VirtualStates ' + 'at hand match have not been implemented') @@ -126,6 +131,7 @@ return (isinstance(other, VirtualStateInfo) and self.known_class.same_constant(other.known_class)) + def debug_header(self, indent): debug_print(indent + 'VirtualStateInfo(%d):' % self.position) @@ -271,55 +277,101 @@ if not self.intbound.contains_bound(other.intbound): return False - if self.lenbound and other.lenbound: - if self.lenbound.mode != other.lenbound.mode or \ - self.lenbound.descr != other.lenbound.descr or \ - not self.lenbound.bound.contains_bound(other.lenbound.bound): - return False - elif self.lenbound: - return False + if self.lenbound: + return self.lenbound.generalization_of(other.lenbound) return True - def _generate_guards(self, other, value, cpu, extra_guards, renum): + def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): box = value.box if not isinstance(other, NotVirtualStateInfo): + bad[self] = bad[other] = None raise InvalidLoop('The VirtualStates does not match as a ' + 'virtual appears where a pointer is needed ' + 'and it is too late to force it.') - if self.lenbound or other.lenbound: - raise InvalidLoop('The array length bounds does not match.') - if self.is_opaque: + bad[self] = bad[other] = None raise InvalidLoop('Generating guards for opaque pointers is not safe') - # the following conditions always peek into the runtime value that the + if self.lenbound and not self.lenbound.generalization_of(other.lenbound): + raise InvalidLoop() + + + if self.level == LEVEL_UNKNOWN: + return + + # the following conditions often peek into the runtime value that the # box had when tracing. This value is only used as an educated guess. # It is used here to choose between either emitting a guard and jumping # to an existing compiled loop or retracing the loop. Both alternatives # will always generate correct behaviour, but performance will differ. - if (self.level == LEVEL_CONSTANT and - self.constbox.same_constant(box.constbox())): - op = ResOperation(rop.GUARD_VALUE, [box, self.constbox], None) - extra_guards.append(op) - return + elif self.level == LEVEL_NONNULL: + if other.level == LEVEL_UNKNOWN: + if box.nonnull(): + op = ResOperation(rop.GUARD_NONNULL, [box], None) + extra_guards.append(op) + return + else: + raise InvalidLoop() + elif other.level == LEVEL_NONNULL: + return + elif other.level == LEVEL_KNOWNCLASS: + return # implies nonnull + else: + assert other.level == LEVEL_CONSTANT + assert other.constbox + if not other.constbox.nonnull(): + raise InvalidLoop("XXX") + return - if self.level == LEVEL_KNOWNCLASS and \ - box.nonnull() and \ - self.known_class.same_constant(cpu.ts.cls_of_box(box)): - op = ResOperation(rop.GUARD_NONNULL, [box], None) - extra_guards.append(op) - op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) - extra_guards.append(op) - return + elif self.level == LEVEL_KNOWNCLASS: + if other.level == LEVEL_UNKNOWN: + if (box.nonnull() and + self.known_class.same_constant(cpu.ts.cls_of_box(box))): + op = ResOperation(rop.GUARD_NONNULL, [box], None) + extra_guards.append(op) + op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + extra_guards.append(op) + return + else: + raise InvalidLoop() + elif other.level == LEVEL_NONNULL: + if self.known_class.same_constant(cpu.ts.cls_of_box(box)): + op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + extra_guards.append(op) + return + else: + raise InvalidLoop() + elif other.level == LEVEL_KNOWNCLASS: + if self.known_class.same_constant(other.known_class): + return + raise InvalidLoop() + else: + assert other.level == LEVEL_CONSTANT + if (other.constbox.nonnull() and + self.known_class.same_constant(cpu.ts.cls_of_box(other.constbox))): + return + else: + raise InvalidLoop() - if (self.level == LEVEL_NONNULL and - other.level == LEVEL_UNKNOWN and - isinstance(box, BoxPtr) and - box.nonnull()): - op = ResOperation(rop.GUARD_NONNULL, [box], None) - extra_guards.append(op) - return + else: + assert self.level == LEVEL_CONSTANT + if other.level == LEVEL_CONSTANT: + if self.constbox.same_constant(other.constbox): + return + raise InvalidLoop() + if self.constbox.same_constant(box.constbox()): + op = ResOperation(rop.GUARD_VALUE, [box, self.constbox], None) + extra_guards.append(op) + return + else: + raise InvalidLoop() + raise InvalidLoop("XXX") + + if self.lenbound or other.lenbound: + bad[self] = bad[other] = None + raise InvalidLoop('The array length bounds does not match.') + if (self.level == LEVEL_UNKNOWN and other.level == LEVEL_UNKNOWN and @@ -345,10 +397,6 @@ extra_guards.append(op) return - # Remaining cases are probably not interesting - raise InvalidLoop('Generating guards for making the VirtualStates ' + - 'at hand match have not been implemented') - def enum_forced_boxes(self, boxes, value, optimizer): if self.level == LEVEL_CONSTANT: return @@ -410,12 +458,14 @@ return False return True - def generate_guards(self, other, values, cpu, extra_guards): + def generate_guards(self, other, values, cpu, extra_guards, bad=None): + if bad is None: + bad = {} assert len(self.state) == len(other.state) == len(values) renum = {} for i in range(len(self.state)): self.state[i].generate_guards(other.state[i], values[i], - cpu, extra_guards, renum) + cpu, extra_guards, renum, bad) def make_inputargs(self, values, optimizer, keyboxes=False): if optimizer.optearlyforce: From noreply at buildbot.pypy.org Tue Apr 8 18:10:09 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 8 Apr 2014 18:10:09 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: bad handling in one place Message-ID: <20140408161009.B4C401D2A89@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70489:de9db9c889d3 Date: 2014-04-08 17:50 +0200 http://bitbucket.org/pypy/pypy/changeset/de9db9c889d3/ Log: bad handling in one place diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -41,10 +41,13 @@ 'virtual states but not in the other.') else: renum[self.position] = other.position - self._generate_guards(other, value, cpu, extra_guards, renum, bad) + try: + self._generate_guards(other, value, cpu, extra_guards, renum, bad) + except InvalidLoop: + bad[self] = bad[other] = None + raise def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): - bad[self] = bad[other] = None raise InvalidLoop('Generating guards for making the VirtualStates ' + 'at hand match have not been implemented') @@ -284,13 +287,11 @@ def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): box = value.box if not isinstance(other, NotVirtualStateInfo): - bad[self] = bad[other] = None raise InvalidLoop('The VirtualStates does not match as a ' + 'virtual appears where a pointer is needed ' + 'and it is too late to force it.') if self.is_opaque: - bad[self] = bad[other] = None raise InvalidLoop('Generating guards for opaque pointers is not safe') if self.lenbound and not self.lenbound.generalization_of(other.lenbound): @@ -369,7 +370,6 @@ raise InvalidLoop("XXX") if self.lenbound or other.lenbound: - bad[self] = bad[other] = None raise InvalidLoop('The array length bounds does not match.') From noreply at buildbot.pypy.org Tue Apr 8 18:10:10 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 8 Apr 2014 18:10:10 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: intbound support Message-ID: <20140408161010.F21FB1D2A89@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70490:3c6bd65ee1b3 Date: 2014-04-08 18:00 +0200 http://bitbucket.org/pypy/pypy/changeset/3c6bd65ee1b3/ Log: intbound support diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -35,7 +35,7 @@ return (other is not None and self.mode == other.mode and self.descr == other.descr and - self.lenbound.bound.contains_bound(other.lenbound.bound)) + self.bound.contains_bound(other.bound)) class OptValue(object): __metaclass__ = extendabletype diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -297,9 +297,11 @@ if self.lenbound and not self.lenbound.generalization_of(other.lenbound): raise InvalidLoop() - if self.level == LEVEL_UNKNOWN: - return + if other.level == LEVEL_UNKNOWN: + return self._generate_guards_intbounds(other, value, extra_guards) + else: + return # matches everything # the following conditions often peek into the runtime value that the # box had when tracing. This value is only used as an educated guess. @@ -369,14 +371,13 @@ raise InvalidLoop() raise InvalidLoop("XXX") - if self.lenbound or other.lenbound: - raise InvalidLoop('The array length bounds does not match.') - - if (self.level == LEVEL_UNKNOWN and - other.level == LEVEL_UNKNOWN and - isinstance(box, BoxInt) and - self.intbound.contains(box.getint())): + def _generate_guards_intbounds(self, other, value, extra_guards): + if self.intbound.contains_bound(other.intbound): + return + box = value.box + if (isinstance(box, BoxInt) and + self.intbound.contains(box.getint())): if self.intbound.has_lower: bound = self.intbound.lower if not (other.intbound.has_lower and @@ -396,6 +397,7 @@ op = ResOperation(rop.GUARD_TRUE, [res], None) extra_guards.append(op) return + raise InvalidLoop("intbounds don't match") def enum_forced_boxes(self, boxes, value, optimizer): if self.level == LEVEL_CONSTANT: From noreply at buildbot.pypy.org Tue Apr 8 18:10:12 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 8 Apr 2014 18:10:12 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: start recursively generating guards for virtuals Message-ID: <20140408161012.4AF151D2A93@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70491:3e921143ad82 Date: 2014-04-08 18:09 +0200 http://bitbucket.org/pypy/pypy/changeset/3e921143ad82/ Log: start recursively generating guards for virtuals diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -154,12 +154,14 @@ value = OptValue(box) return value, box - def guards(self, info1, info2, box_or_value, expected): + def guards(self, info1, info2, box_or_value, expected, inputargs=None): value, box = self._box_or_value(box_or_value) + if inputargs is None: + inputargs = [box] info1.position = info2.position = 0 guards = [] info1.generate_guards(info2, value, self.cpu, guards, {}) - self.compare(guards, expected, [box]) + self.compare(guards, expected, inputargs) def compare(self, guards, expected, inputargs): loop = self.parse(expected) @@ -418,7 +420,6 @@ self.compare(guards, expected, [box2]) def test_generate_guards_on_virtual_fields_matches(self): - py.test.skip("not yet") innervalue1 = OptValue(self.nodebox) constclassbox = self.cpu.ts.cls_of_box(self.nodebox) innervalue1.make_constant_class(constclassbox, -1) @@ -433,7 +434,7 @@ info2 = VirtualStateInfo(ConstInt(42), [1]) info2.fieldstate = [innerinfo2] - value1 = VirtualValue(self.cpu, constclassbox, BoxInt()) + value1 = VirtualValue(self.cpu, constclassbox, self.nodebox) value1._fields = {1: OptValue(self.nodebox)} expected = """ @@ -441,7 +442,7 @@ guard_nonnull(p0) [] guard_class(p0, ConstClass(node_vtable)) [] """ - self.guards(info1, info2, value1, expected) + self.guards(info1, info2, value1, expected, [self.nodebox]) # _________________________________________________________________________ # the below tests don't really have anything to do with guard generation diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -103,6 +103,27 @@ return True + + def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + if not self._generalization_of_structpart(other): + raise InvalidLoop("XXX") + + assert isinstance(other, AbstractVirtualStructStateInfo) + assert len(self.fielddescrs) == len(self.fieldstate) + assert len(other.fielddescrs) == len(other.fieldstate) + assert isinstance(value, virtualize.AbstractVirtualStructValue) + assert value.is_virtual() + + if len(self.fielddescrs) != len(other.fielddescrs): + raise InvalidLoop("XXX") + + for i in range(len(self.fielddescrs)): + if other.fielddescrs[i] is not self.fielddescrs[i]: + raise InvalidLoop("XXX") + v = value._fields[self.fielddescrs[i]] # must be there + self.fieldstate[i].generate_guards(other.fieldstate[i], v, cpu, extra_guards, renum) + + def _generalization_of_structpart(self, other): raise NotImplementedError From noreply at buildbot.pypy.org Tue Apr 8 18:53:28 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 8 Apr 2014 18:53:28 +0200 (CEST) Subject: [pypy-commit] benchmarks default: add a benchmark which runs some worms on a grid (should benefit greatly from Message-ID: <20140408165328.14FBB1C10C2@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r246:c5d6ed268f36 Date: 2014-04-08 18:54 +0200 http://bitbucket.org/pypy/benchmarks/changeset/c5d6ed268f36/ Log: add a benchmark which runs some worms on a grid (should benefit greatly from array-write barriers) diff --git a/multithread/threadworms/threadworms.py b/multithread/threadworms/threadworms.py new file mode 100644 --- /dev/null +++ b/multithread/threadworms/threadworms.py @@ -0,0 +1,193 @@ +# Threadworms (a Python/Pygame threading demonstration) +# By Al Sweigart al at inventwithpython.com +# http://inventwithpython.com/blog +# Released under a "Simplified BSD" license + +# This is meant to be an educational example of multithreaded programming, +# so I get kind of verbose in the comments. + +from common.abstract_threading import atomic, Future +import time +import random, sys, threading + +# Setting up constants +CELLS_WIDE = 1000 # how many cells wide the grid is +CELLS_HIGH = 1000 # how many cells high the grid is +GRID = [] +NUM_STEPS = 0 +MAX_WORM_SIZE = 5 + + +UP = 'up' +DOWN = 'down' +LEFT = 'left' +RIGHT = 'right' + +HEAD = 0 +BUTT = -1 # negative indexes count from the end, so -1 will always be the last index + + +class Worm(threading.Thread): # "Thread" is a class in the "threading" module. + def __init__(self, name='Worm', maxsize=MAX_WORM_SIZE, color=None): + threading.Thread.__init__(self) + self.name = name + self.rnd = random.Random() + self.maxsize = maxsize + + if color is None: + self.color = (random.randint(60, 255), random.randint(60, 255), random.randint(60, 255)) + else: + self.color = color + + # GRID_LOCK.acquire() # block until this thread can acquire the lock + with atomic: + while True: + startx = random.randint(0, CELLS_WIDE - 1) + starty = random.randint(0, CELLS_HIGH - 1) + if GRID[startx][starty] is None: + break # we've found an unoccupied cell in the grid + + GRID[startx][starty] = self.color # modify the shared data structure + # GRID_LOCK.release() + + + self.body = [{'x': startx, 'y': starty}] + self.direction = random.choice((UP, DOWN, LEFT, RIGHT)) + + + def run(self): + for _ in xrange(NUM_STEPS): + if self.rnd.randint(0, 100) < 20: # 20% to change direction + self.direction = self.rnd.choice((UP, DOWN, LEFT, RIGHT)) + + with atomic: + # GRID_LOCK.acquire() # don't return (that is, block) until this thread can acquire the lock + + nextx, nexty = self.getNextPosition() + if nextx in (-1, CELLS_WIDE) or nexty in (-1, CELLS_HIGH) or GRID[nextx][nexty] is not None: + self.direction = self.getNewDirection() + if self.direction is None: + self.body.reverse() # Now the head is the butt and the butt is the head. Magic! + self.direction = self.getNewDirection() + + if self.direction is not None: + nextx, nexty = self.getNextPosition() + + if self.direction is not None: + GRID[nextx][nexty] = self.color # update the GRID state + self.body.insert(0, {'x': nextx, 'y': nexty}) # update this worm's own state + + if len(self.body) > self.maxsize: + GRID[self.body[BUTT]['x']][self.body[BUTT]['y']] = None # update the GRID state + del self.body[BUTT] # update this worm's own state (heh heh, worm butt) + else: + self.direction = self.rnd.choice((UP, DOWN, LEFT, RIGHT)) # can't move, so just do nothing for now but set a new random direction + + # GRID_LOCK.release() + + + def getNextPosition(self): + if self.direction == UP: + nextx = self.body[HEAD]['x'] + nexty = self.body[HEAD]['y'] - 1 + elif self.direction == DOWN: + nextx = self.body[HEAD]['x'] + nexty = self.body[HEAD]['y'] + 1 + elif self.direction == LEFT: + nextx = self.body[HEAD]['x'] - 1 + nexty = self.body[HEAD]['y'] + elif self.direction == RIGHT: + nextx = self.body[HEAD]['x'] + 1 + nexty = self.body[HEAD]['y'] + else: + assert False, 'Bad value for self.direction: %s' % self.direction + + return nextx, nexty + + + def getNewDirection(self): + x = self.body[HEAD]['x'] # syntactic sugar, makes the code below more readable + y = self.body[HEAD]['y'] + + newDirection = [] + if y - 1 not in (-1, CELLS_HIGH) and GRID[x][y - 1] is None: + newDirection.append(UP) + if y + 1 not in (-1, CELLS_HIGH) and GRID[x][y + 1] is None: + newDirection.append(DOWN) + if x - 1 not in (-1, CELLS_WIDE) and GRID[x - 1][y] is None: + newDirection.append(LEFT) + if x + 1 not in (-1, CELLS_WIDE) and GRID[x + 1][y] is None: + newDirection.append(RIGHT) + + if newDirection == []: + return None # None is returned when there are no possible ways for the worm to move. + + return self.rnd.choice(newDirection) + +def run(worms=2, steps=10000000): + global DISPLAYSURF, NUM_WORMS, NUM_STEPS, GRID + NUM_WORMS = int(worms) + NUM_STEPS = int(steps) / NUM_WORMS + + GRID = [] + for x in range(CELLS_WIDE): + GRID.append([None] * CELLS_HIGH) +#GRID_LOCK = threading.Lock() # pun was not intended + + # Draw some walls on the grid +# squares = """ +# ........................... +# ........................... +# ........................... +# .H..H..EEE..L....L.....OO.. +# .H..H..E....L....L....O..O. +# .HHHH..EE...L....L....O..O. +# .H..H..E....L....L....O..O. +# .H..H..EEE..LLL..LLL...OO.. +# ........................... +# .W.....W...OO...RRR..MM.MM. +# .W.....W..O..O..R.R..M.M.M. +# .W..W..W..O..O..RR...M.M.M. +# .W..W..W..O..O..R.R..M...M. +# ..WW.WW....OO...R.R..M...M. +# ........................... +# ........................... +# """ + #setGridSquares(squares) + + # Create the worm objects. + worms = [] # a list that contains all the worm objects + for i in range(NUM_WORMS): + worms.append(Worm()) + for w in worms: + w.start() # Start the worm code in its own thread. + + for t in worms: + t.join() + + + +def setGridSquares(squares, color=(192, 192, 192)): + squares = squares.split('\n') + if squares[0] == '': + del squares[0] + if squares[-1] == '': + del squares[-1] + + with atomic: + # GRID_LOCK.acquire() + for y in range(min(len(squares), CELLS_HIGH)): + for x in range(min(len(squares[y]), CELLS_WIDE)): + if squares[y][x] == ' ': + GRID[x][y] = None + elif squares[y][x] == '.': + pass + else: + GRID[x][y] = color + # GRID_LOCK.release() + + + + +if __name__ == '__main__': + run() From noreply at buildbot.pypy.org Tue Apr 8 19:04:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 19:04:50 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add what we get from "download as text" from google docs: not good at Message-ID: <20140408170450.D28661C1C7F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5194:8fbca1504d5a Date: 2014-04-08 18:05 +0200 http://bitbucket.org/pypy/extradoc/changeset/8fbca1504d5a/ Log: Add what we get from "download as text" from google docs: not good at all, but better than nothing diff --git a/talk/wtm2014/WTM_Talk.txt b/talk/wtm2014/WTM_Talk.txt new file mode 100644 --- /dev/null +++ b/talk/wtm2014/WTM_Talk.txt @@ -0,0 +1,206 @@ +STMGC-C7 + +Fast Software Transactional Memory for Dynamic Languages +Remi Meier + +Department of Computer Science +ETH Zürich +Armin Rigo + +www.pypy.org + +Current Situation +Dynamic languages popular +(Python, Ruby, PHP, JavaScript) +Parallelization is a problem: GIL +Atomicity & isolation for bytecode instructions + +→ Transactional Memory +Concurrency, but no parallelism + +Background: Current TM systems +TM implemented in hardware: HTM +(e.g. Intel Haswell CPU) +Limited size of transactions +Not so flexible (e.g. less runtime feedback) +Fast +TM implemented in software: STM +No limits +Much more flexible +A lot of overhead (2-10x) ← we want to change that + +Background: STM Overhead +Major source of STM overhead in barriers +All over the place +Isolation (Copy-On-Write, Locking, …) +Validation +Reference resolution (for COW) + + + + +O = read(O) +return O +return find_right_version(O) +right version +slowpath + +Our Goal +We don’t want to resolve references: +no “right version” check +no find_right_version() +We want +Copy-on-write (easy & efficient) +An object has always only one unique reference +Threads automatically see their version of an obj +Not to lose the flexibility of STM +Big part of the STM overhead + +C7: Implementation +How can two copies of an object share the same reference? + +Or + +How can one reference point to two different locations in memory if used in different +threads? + +C7: Segmentation +Partition virtual memory into segments +1 segment per thread +Each segment is a copy → same contents in all segments +All copies of an object are at the same segment offset (SO) in each segment +Segment 0 +Segment 1 +Virtual Memory Pages +SO +SO + +C7: Memory segmentation +Use SO as object reference +Need to translate to linear address (LA): LA = segment address + SO +Hardware supported ⇒ fast! +%gs holds a thread’s segment address +%gs::SO translated to different LAs by CPU +SO +SO +%gs for a thread +%gs for another thread +LA: %gs::SO +LA: %gs::SO +LA: NULL + +C7: Segment Offset +One SO → multiple LAs +Extremely inefficient: +N-times the memory +1 allocation ⇒ N allocations +1 write ⇒ N writes +SO +SO +%gs for a thread +%gs for another thread +LA: %gs::SO +LA: %gs::SO +LA: NULL +✓ +How to share memory? + +C7: Page Sharing +Partition virtual memory into segments: each segment is backed by different memory +a +b +c +d +e +f +a’ +b’ +c’ +d’ +e’ +f’ +Segment 0 +Segment 1 +Virtual Memory Pages +Virtual File Pages +1:1 mapping + +C7: Page Sharing +Remap segment 1: Both segments share the same memory + + +a +b +c +d +e +f +Segment 0 +Segment 1 +Virtual Memory Pages +Virtual File Pages +N:1 mapping + +C7: Page Sharing +We can unshare / privatize pages +a +b +c +d +e +f +c’ +Segment 0 +Segment 1 +Virtual Memory Pages +Virtual File Pages +copy… +mixed mapping + +C7: Copy-On-Write +2-step address translation: +%gs + SO → LA +LA → memory location +Memory location can be shared or private +Initially fully shared memory +Copy-on-write ⇒ switch to private memory: each thread has a private copy + +SO never changes + +C7: Barriers +SO always translates to the right version → no “right version” check → no find_right_version() +COW check for writing to non-local object + + +C7: Summary +Very cheap barriers +Hardware accelerated address translation +Page-level COW +Object-level conflict detection + +Limitations +Huge address space needed (64bit) +configurable static max. amount of memory +Optimized for low #segments + +Evaluation +PyPy Python interpreter +GIL version vs. STM version +Overhead compared to sequential execution +System: +Intel Core i7-4770, 3.4GHz, 4 cores & HT +16 GB RAM + +Evaluation +Some benchmarks (Richards, Raytrace, …?) +scaling to 4 cores +GIL vs. STM + +Evaluation: Overhead +Overhead-Breakdown + +Summary +Optimized for low #CPUs +Optimized for dynamic language VMs +Overhead < 50% +Still STM, not HTM → flexibility From noreply at buildbot.pypy.org Tue Apr 8 19:04:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 19:04:51 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a draft, a slight refactoring of the existing talk Message-ID: <20140408170451.F0E411C1C7F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5195:600ae5c3c6a9 Date: 2014-04-08 19:04 +0200 http://bitbucket.org/pypy/extradoc/changeset/600ae5c3c6a9/ Log: Add a draft, a slight refactoring of the existing talk diff --git a/talk/wtm2014/draft.txt b/talk/wtm2014/draft.txt new file mode 100644 --- /dev/null +++ b/talk/wtm2014/draft.txt @@ -0,0 +1,153 @@ + + +Title page +========== + + + +Current Situation +================= + +Dynamic languages popular +(Python, Ruby, PHP, JavaScript...) + +Parallelization is a problem: + +GIL + Atomicity & isolation for bytecode instructions + No real concurrency + +Multi-process + Exchanging data explicitly + Only suitable for some kinds of applications + + +RPython +======= + +RPython: language to generate virtual machines + + Generational garbage collector + Just-in-Time meta-compiler + Software Transactional Memory <- new + +PyPy: Python implementation in RPython + +Topaz: Ruby implementation in RPython + +etc. + + +Transactional Memory +==================== + +Goal 1. A transaction executes N bytecodes + + Existing multithred programs use multiple cores + The whole program is doing only transactions + -> Good performance is essential + +Goal 2. Improved multithreading model + + Better programming model for the end user + Boundaries controlled by the program + Much longer transactions + -> HTM is far too limited for now + + +Background: STM Overhead +======================== + +Major source of STM overhead in barriers +All over the place +Isolation (Copy-On-Write, Locking, …) +Validation +Reference resolution (for COW) + +O = read(O) +return O +return find_right_version(O) +right version +slowpath + + +C7: It's Just a Nice Trick +========================== + +Can two copies of an object share the same +reference? + +Can one reference point to two different +locations in memory if used from two +different threads? + + +C7: Segmentation +================ + +... + +C7: Page Sharing +================ + +... + + +(C7: Copy-On-Write is merged with the next slide) + + +C7: Read Barriers +================= + +2-step address translation (all in hardware): +%gs + SO → LA +LA → memory location + +SO never changes + +SO always translates to the right version + no “right version” check + no find_right_version() + + +C7: Write Barriers +================== + +Write Barrier does Copy-On-Write + + By copying the whole page + Only on first access to this page + Pages shared again at major collections + +Low cost, page-level COW +Object-level conflict detection + + +C7: Total costs +=============== + +Extremely cheap read and write barriers + +Integrated with garbage collection + Most new objects die quickly + One write barrier for both STM and GC + No write barriers on objects from same transaction + +Commit-time costs + Detect write-read conflicts + Copy around the objects in non-shared pages + Reasonable + + +C7: Summary +=========== + +Total overhead < 50% + +Huge address space needed (64bit) + +Optimized for low #CPUs + +Optimized for dynamic language VMs + +Still STM, not HTM → flexibility From noreply at buildbot.pypy.org Tue Apr 8 19:07:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 19:07:50 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Tweak Message-ID: <20140408170750.170381C1C7F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5196:04a7829062d6 Date: 2014-04-08 19:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/04a7829062d6/ Log: Tweak diff --git a/talk/wtm2014/draft.txt b/talk/wtm2014/draft.txt --- a/talk/wtm2014/draft.txt +++ b/talk/wtm2014/draft.txt @@ -109,6 +109,8 @@ no “right version” check no find_right_version() +Still need to set a flag "this object was read" + C7: Write Barriers ================== From noreply at buildbot.pypy.org Tue Apr 8 20:31:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 20:31:25 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: A test specifically for the synchronization missing on small objs Message-ID: <20140408183125.AA5DD1C3288@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1141:390e71973114 Date: 2014-04-07 18:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/390e71973114/ Log: A test specifically for the synchronization missing on small objs diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -197,3 +197,30 @@ self.start_transaction() assert lib.stm_can_move(old) == 0 + + def test_synchronize_small_obj(self): + # make a shared page, and privatize it + self.start_transaction() + new = stm_allocate(16) + self.push_root(new) + self.commit_transaction() + new = self.pop_root() + self.push_root(new) + + self.start_transaction() + stm_set_char(new, 'A') + self.commit_transaction() + + # make a new object of the same size, which should end in the + # same page + self.start_transaction() + new2 = stm_allocate(16) + stm_set_char(new2, 'a') + self.push_root(new2) + self.commit_transaction() + new2 = self.pop_root() + + # check that this new object was correctly sychronized + self.switch(1) + self.start_transaction() + assert stm_get_char(new2) == 'a' From noreply at buildbot.pypy.org Tue Apr 8 20:31:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 20:31:26 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add extra asserts to check that we're not remapping random unrelated pages Message-ID: <20140408183126.DB1401C3288@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1142:9b285d8d47b0 Date: 2014-04-08 20:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/9b285d8d47b0/ Log: Add extra asserts to check that we're not remapping random unrelated pages diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -95,6 +95,17 @@ (void *)((addr - stm_object_pages) % (4096UL * NB_PAGES)), (long)pgoff / NB_PAGES, (void *)((pgoff % NB_PAGES) * 4096UL))); + assert(size % 4096 == 0); + assert(size <= TOTAL_MEMORY); + assert(((uintptr_t)addr) % 4096 == 0); + assert(addr >= stm_object_pages); + assert(addr <= stm_object_pages + TOTAL_MEMORY - size); + assert(pgoff >= 0); + assert(pgoff <= (TOTAL_MEMORY - size) / 4096UL); + + /* assert remappings follow the rule that page N in one segment + can only be remapped to page N in another segment */ + assert(((addr - stm_object_pages) / 4096UL - pgoff) % NB_PAGES == 0); int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) From noreply at buildbot.pypy.org Tue Apr 8 20:31:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Apr 2014 20:31:28 +0200 (CEST) Subject: [pypy-commit] stmgc default: merge heads Message-ID: <20140408183128.2EE5C1C3288@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1143:11188e659c6f Date: 2014-04-08 20:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/11188e659c6f/ Log: merge heads diff --git a/c7/gdb/gdb_stm.py b/c7/gdb/gdb_stm.py new file mode 100644 --- /dev/null +++ b/c7/gdb/gdb_stm.py @@ -0,0 +1,49 @@ +""" Adds two built-in functions: $rfs(p=0) and $rgs(p=0). + +Returns the number or the address 'p', offset with the value of +the %fs or %gs register in the current thread. + +Usage: you can for example add this line in your ~/.gdbinit: + + python execfile('/path/to/gdb_stm.py') +""" +import gdb + +def gdb_function(func): + class Func(gdb.Function): + __doc__ = func.__doc__ + invoke = staticmethod(func) + Func(func.__name__) + +# ------------------------------------------------------- + +SEG_FS = 0x1003 +SEG_GS = 0x1004 + +def get_segment_register(which): + v = gdb.parse_and_eval('(long*)malloc(8)') + L = gdb.lookup_type('long') + gdb.parse_and_eval('arch_prctl(%d, %d)' % (which, int(v.cast(L)))) + result = int(v.dereference()) + gdb.parse_and_eval('free(%d)' % (int(v.cast(L)),)) + return result + +def rfsrgs(name, which): + seg = get_segment_register(which) + if name is None: + return seg + tp = name.type + if tp.code == gdb.TYPE_CODE_INT: + return name + seg + assert tp.code == gdb.TYPE_CODE_PTR + L = gdb.lookup_type('long') + return (name.cast(L) + seg).cast(tp) + + at gdb_function +def rfs(name=None): + return rfsrgs(name, SEG_FS) + + at gdb_function +def rgs(name=None): + return rfsrgs(name, SEG_GS) + diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -80,7 +80,6 @@ /* thread-safe: use the lock of pages.c to prevent any remapping from occurring under our feet */ mutex_pages_lock(); - increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -52,6 +52,7 @@ #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) #define END_MARKER 0xDEADBEEF +#define MIN_ALLOC_SIZE (sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) @@ -88,7 +89,7 @@ The additional chunks of a given size are linked "vertically" in the secondary 'u' doubly-linked list. - + +-----+ | 296 | +-----+ @@ -258,8 +259,8 @@ /* it can be very small, but we need to ensure a minimal size (currently 32 bytes) */ - if (request_size < sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) - request_size = sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE; + if (request_size < MIN_ALLOC_SIZE) + request_size = MIN_ALLOC_SIZE; size_t index = largebin_index(request_size); sort_bin(index); @@ -333,6 +334,7 @@ } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; + increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); return (char *)&mscan->d; } @@ -343,6 +345,9 @@ assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); + /* 'size' is at least MIN_ALLOC_SIZE */ + increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); + #ifndef NDEBUG assert(chunk->size >= sizeof(dlist_t)); assert(chunk->size <= (((char *)last_chunk) - (char *)data)); @@ -554,7 +559,6 @@ chunk = next_chunk(chunk); /* go to the first non-free chunk */ while (chunk != last_chunk) { - /* here, the chunk we're pointing to is not free */ assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -566,8 +570,6 @@ /* use the callback to know if 'chunk' contains an object that survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { - size_t size = chunk->size; - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free((char *)&chunk->d); /* dies */ } chunk = mnext; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -243,7 +243,6 @@ } char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; From noreply at buildbot.pypy.org Tue Apr 8 21:42:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 8 Apr 2014 21:42:13 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: backout ebd9a9125c8c: len shouldn't raise a ValueError here. refs cpython Message-ID: <20140408194213.1FC0A1C10C2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70492:030da16dc694 Date: 2014-04-08 12:40 -0700 http://bitbucket.org/pypy/pypy/changeset/030da16dc694/ Log: backout ebd9a9125c8c: len shouldn't raise a ValueError here. refs cpython issue21173 diff --git a/lib-python/3/test/test_weakref.py b/lib-python/3/test/test_weakref.py --- a/lib-python/3/test/test_weakref.py +++ b/lib-python/3/test/test_weakref.py @@ -1153,7 +1153,6 @@ self.assertEqual(dict, ddict) with testcontext() as (k, v): dict.clear() - gc_collect() self.assertEqual(len(dict), 0) def test_weak_keys_destroy_while_iterating(self): From noreply at buildbot.pypy.org Wed Apr 9 01:32:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 01:32:29 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: cpython issue12983: bytes literals with invalid \x escape now raise a Message-ID: <20140408233229.CF2BE1C1041@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70493:f8d19753723f Date: 2014-04-08 16:20 -0700 http://bitbucket.org/pypy/pypy/changeset/f8d19753723f/ Log: cpython issue12983: bytes literals with invalid \x escape now raise a SyntaxError diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -1122,9 +1122,10 @@ sub_strings_w = [parsestring.parsestr(space, encoding, s.value) for s in atom_node.children] except error.OperationError, e: - if not e.match(space, space.w_UnicodeError): + if not (e.match(space, space.w_UnicodeError) or + e.match(space, space.w_ValueError)): raise - # UnicodeError in literal: turn into SyntaxError + # Unicode/ValueError in literal: turn into SyntaxError self.error(e.errorstr(space), atom_node) sub_strings_w = [] # please annotator # Implement implicit string concatenation. diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -1307,3 +1307,7 @@ if1, if2 = comps[0].ifs assert isinstance(if1, ast.Name) assert isinstance(if2, ast.Name) + + def test_cpython_issue12983(self): + raises(SyntaxError, self.get_ast, r"""b'\x'""") + raises(SyntaxError, self.get_ast, r"""b'\x0'""") diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -191,7 +191,8 @@ ps += 2 else: if errors == 'strict': - raise_app_valueerror(space, 'invalid \\x escape') + raise_app_valueerror( + space, "invalid \\x escape at position %d" % (ps - 2)) elif errors == 'replace': builder.append('?') elif errors == 'ignore': From noreply at buildbot.pypy.org Wed Apr 9 01:32:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 01:32:31 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: workaround for staticmethod descriptors on PyPy Message-ID: <20140408233231.40FCC1C1041@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70494:52f0a7a337d7 Date: 2014-04-08 16:21 -0700 http://bitbucket.org/pypy/pypy/changeset/52f0a7a337d7/ Log: workaround for staticmethod descriptors on PyPy diff --git a/lib-python/3/test/test_pydoc.py b/lib-python/3/test/test_pydoc.py --- a/lib-python/3/test/test_pydoc.py +++ b/lib-python/3/test/test_pydoc.py @@ -417,6 +417,8 @@ # What we expect to get back: everything on object... expected = dict(vars(object)) + # __new__'s descriptor can be a staticmethod on PyPy + expected['__new__'] = object.__new__ # ...plus our unbound method... expected['method_returning_true'] = TestClass.method_returning_true # ...but not the non-methods on object. From noreply at buildbot.pypy.org Wed Apr 9 08:53:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 08:53:07 +0200 (CEST) Subject: [pypy-commit] stmgc default: Oooops. The shadow stack is configured to contain only 1000 entries. Message-ID: <20140409065307.122351D2380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1144:8077f97539fc Date: 2014-04-09 08:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/8077f97539fc/ Log: Oooops. The shadow stack is configured to contain only 1000 entries. That's the reason PyPy crashes randomly: it overflows and overwrites random other data structures. diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -35,8 +35,6 @@ #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) #define WRITELOCK_END READMARKER_END -#define SHADOW_STACK_SIZE 1000 - enum /* stm_flags */ { /* This flag is set on non-nursery objects. It forces stm_write() to call _stm_write_slowpath(). diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -134,7 +134,7 @@ static void _init_shadow_stack(stm_thread_local_t *tl) { struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) - malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); + malloc(STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s)); assert(s); tl->shadowstack = s; tl->shadowstack_base = s; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -258,6 +258,10 @@ void stm_setup(void); void stm_teardown(void); +/* The size of each shadow stack, in number of entries. + Must be big enough to accomodate all STM_PUSH_ROOTs! */ +#define STM_SHADOW_STACK_DEPTH 163840 + /* Push and pop roots from/to the shadow stack. Only allowed inside transaction. */ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) From noreply at buildbot.pypy.org Wed Apr 9 08:53:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 08:53:08 +0200 (CEST) Subject: [pypy-commit] stmgc default: merge heads Message-ID: <20140409065308.5C7731D2380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1145:16bd0be67e44 Date: 2014-04-09 08:51 +0200 http://bitbucket.org/pypy/stmgc/changeset/16bd0be67e44/ Log: merge heads diff --git a/c7/gdb/gdb_stm.py b/c7/gdb/gdb_stm.py new file mode 100644 --- /dev/null +++ b/c7/gdb/gdb_stm.py @@ -0,0 +1,49 @@ +""" Adds two built-in functions: $rfs(p=0) and $rgs(p=0). + +Returns the number or the address 'p', offset with the value of +the %fs or %gs register in the current thread. + +Usage: you can for example add this line in your ~/.gdbinit: + + python execfile('/path/to/gdb_stm.py') +""" +import gdb + +def gdb_function(func): + class Func(gdb.Function): + __doc__ = func.__doc__ + invoke = staticmethod(func) + Func(func.__name__) + +# ------------------------------------------------------- + +SEG_FS = 0x1003 +SEG_GS = 0x1004 + +def get_segment_register(which): + v = gdb.parse_and_eval('(long*)malloc(8)') + L = gdb.lookup_type('long') + gdb.parse_and_eval('arch_prctl(%d, %d)' % (which, int(v.cast(L)))) + result = int(v.dereference()) + gdb.parse_and_eval('free(%d)' % (int(v.cast(L)),)) + return result + +def rfsrgs(name, which): + seg = get_segment_register(which) + if name is None: + return seg + tp = name.type + if tp.code == gdb.TYPE_CODE_INT: + return name + seg + assert tp.code == gdb.TYPE_CODE_PTR + L = gdb.lookup_type('long') + return (name.cast(L) + seg).cast(tp) + + at gdb_function +def rfs(name=None): + return rfsrgs(name, SEG_FS) + + at gdb_function +def rgs(name=None): + return rfsrgs(name, SEG_GS) + diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -80,7 +80,6 @@ /* thread-safe: use the lock of pages.c to prevent any remapping from occurring under our feet */ mutex_pages_lock(); - increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -52,6 +52,7 @@ #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) #define END_MARKER 0xDEADBEEF +#define MIN_ALLOC_SIZE (sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) @@ -88,7 +89,7 @@ The additional chunks of a given size are linked "vertically" in the secondary 'u' doubly-linked list. - + +-----+ | 296 | +-----+ @@ -258,8 +259,8 @@ /* it can be very small, but we need to ensure a minimal size (currently 32 bytes) */ - if (request_size < sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) - request_size = sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE; + if (request_size < MIN_ALLOC_SIZE) + request_size = MIN_ALLOC_SIZE; size_t index = largebin_index(request_size); sort_bin(index); @@ -333,6 +334,7 @@ } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; + increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); return (char *)&mscan->d; } @@ -343,6 +345,9 @@ assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); + /* 'size' is at least MIN_ALLOC_SIZE */ + increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); + #ifndef NDEBUG assert(chunk->size >= sizeof(dlist_t)); assert(chunk->size <= (((char *)last_chunk) - (char *)data)); @@ -554,7 +559,6 @@ chunk = next_chunk(chunk); /* go to the first non-free chunk */ while (chunk != last_chunk) { - /* here, the chunk we're pointing to is not free */ assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -566,8 +570,6 @@ /* use the callback to know if 'chunk' contains an object that survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { - size_t size = chunk->size; - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free((char *)&chunk->d); /* dies */ } chunk = mnext; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -243,7 +243,6 @@ } char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; From noreply at buildbot.pypy.org Wed Apr 9 08:53:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 08:53:09 +0200 (CEST) Subject: [pypy-commit] stmgc default: Bad Remi, no cookie :-) The tests don't run because these lines compute Message-ID: <20140409065309.865DA1D2380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1146:859b241ec058 Date: 2014-04-09 08:52 +0200 http://bitbucket.org/pypy/stmgc/changeset/859b241ec058/ Log: Bad Remi, no cookie :-) The tests don't run because these lines compute unused results now. diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -241,8 +241,6 @@ mutex_pages_lock(); locked = true; } - char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); - ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; From noreply at buildbot.pypy.org Wed Apr 9 08:57:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 08:57:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/859b241ec058 Message-ID: <20140409065716.50A961D2380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70495:b71c564ec8ea Date: 2014-04-09 08:56 +0200 http://bitbucket.org/pypy/pypy/changeset/b71c564ec8ea/ Log: import stmgc/859b241ec058 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -cfd37feb0f23+ +859b241ec058 diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -36,8 +36,6 @@ #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) #define WRITELOCK_END READMARKER_END -#define SHADOW_STACK_SIZE 1000 - enum /* stm_flags */ { /* This flag is set on non-nursery objects. It forces stm_write() to call _stm_write_slowpath(). diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -242,8 +242,6 @@ mutex_pages_lock(); locked = true; } - char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); - ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -96,6 +96,17 @@ (void *)((addr - stm_object_pages) % (4096UL * NB_PAGES)), (long)pgoff / NB_PAGES, (void *)((pgoff % NB_PAGES) * 4096UL))); + assert(size % 4096 == 0); + assert(size <= TOTAL_MEMORY); + assert(((uintptr_t)addr) % 4096 == 0); + assert(addr >= stm_object_pages); + assert(addr <= stm_object_pages + TOTAL_MEMORY - size); + assert(pgoff >= 0); + assert(pgoff <= (TOTAL_MEMORY - size) / 4096UL); + + /* assert remappings follow the rule that page N in one segment + can only be remapped to page N in another segment */ + assert(((addr - stm_object_pages) / 4096UL - pgoff) % NB_PAGES == 0); int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -135,7 +135,7 @@ static void _init_shadow_stack(stm_thread_local_t *tl) { struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) - malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); + malloc(STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s)); assert(s); tl->shadowstack = s; tl->shadowstack_base = s; diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -259,6 +259,10 @@ void stm_setup(void); void stm_teardown(void); +/* The size of each shadow stack, in number of entries. + Must be big enough to accomodate all STM_PUSH_ROOTs! */ +#define STM_SHADOW_STACK_DEPTH 163840 + /* Push and pop roots from/to the shadow stack. Only allowed inside transaction. */ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) From noreply at buildbot.pypy.org Wed Apr 9 09:23:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 09:23:15 +0200 (CEST) Subject: [pypy-commit] stmgc default: Use a trap page at the end of the shadow stack. Message-ID: <20140409072315.DCEA81D23C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1147:f77259e8fb8f Date: 2014-04-09 09:23 +0200 http://bitbucket.org/pypy/stmgc/changeset/f77259e8fb8f/ Log: Use a trap page at the end of the shadow stack. diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -9,7 +9,7 @@ PROT_READ | PROT_WRITE, MAP_PAGES_FLAGS, -1, 0); if (result == MAP_FAILED) - stm_fatalerror("%s failed: %m\n", reason); + stm_fatalerror("%s failed: %m", reason); return result; } @@ -131,17 +131,37 @@ teardown_pages(); } +static void _shadowstack_trap_page(char *start, int prot) +{ + size_t bsize = STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s); + char *end = start + bsize + 4095; + end -= (((uintptr_t)end) & 4095); + mprotect(end, 4096, prot); +} + static void _init_shadow_stack(stm_thread_local_t *tl) { - struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) - malloc(STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s)); - assert(s); + size_t bsize = STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s); + char *start = malloc(bsize + 8192); /* for the trap page, plus rounding */ + if (!start) + stm_fatalerror("can't allocate shadow stack"); + + /* set up a trap page: if the shadowstack overflows, it will + crash in a clean segfault */ + _shadowstack_trap_page(start, PROT_NONE); + + struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; } static void _done_shadow_stack(stm_thread_local_t *tl) { + assert(tl->shadowstack >= tl->shadowstack_base); + + char *start = (char *)tl->shadowstack_base; + _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); + free(tl->shadowstack_base); tl->shadowstack = NULL; tl->shadowstack_base = NULL; From noreply at buildbot.pypy.org Wed Apr 9 10:07:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 10:07:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Deactivate the submodule '__pypy__.thread' if we're translating without Message-ID: <20140409080741.B3E751D24E9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70496:64954c862970 Date: 2014-04-09 10:07 +0200 http://bitbucket.org/pypy/pypy/changeset/64954c862970/ Log: Deactivate the submodule '__pypy__.thread' if we're translating without threads. diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -24,7 +24,7 @@ self.submodules_w = [] def install(self): - """NOT_RPYTHON: install this module, and it's submodules into + """NOT_RPYTHON: install this module, and its submodules into space.builtin_modules""" Module.install(self) if hasattr(self, "submodules"): @@ -33,6 +33,8 @@ for sub_name, module_cls in self.submodules.iteritems(): module_name = space.wrap("%s.%s" % (name, sub_name)) m = module_cls(space, module_name) + if hasattr(m, 'activate') and not m.activate(space): + continue m.install() self.submodules_w.append(m) diff --git a/pypy/interpreter/test/test_mixedmodule.py b/pypy/interpreter/test/test_mixedmodule.py --- a/pypy/interpreter/test/test_mixedmodule.py +++ b/pypy/interpreter/test/test_mixedmodule.py @@ -18,11 +18,25 @@ interpleveldefs = {} appleveldefs = {} + class SubModule1(MixedModule): + interpleveldefs = {} + appleveldefs = {} + def activate(self, space): + return True + + class SubModule2(MixedModule): + interpleveldefs = {} + appleveldefs = {} + def activate(self, space): + return False + class Module(MixedModule): interpleveldefs = {} appleveldefs = {} submodules = { - "sub": SubModule + "sub": SubModule, + "sub1": SubModule1, + "sub2": SubModule2, } m = Module(self.space, self.space.wrap("test_module")) @@ -30,6 +44,8 @@ assert self.space.builtin_modules["test_module"] is m assert isinstance(self.space.builtin_modules["test_module.sub"], SubModule) + assert "test_module.sub1" in self.space.builtin_modules + assert "test_module.sub2" not in self.space.builtin_modules class AppTestMixedModule(object): pytestmark = py.test.mark.skipif("config.option.runappdirect") diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -43,6 +43,8 @@ 'discard_last_abort_info': 'interp_atomic.discard_last_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', } + def activate(self, space): + return self.space.config.objspace.usemodules.thread class IntOpModule(MixedModule): From noreply at buildbot.pypy.org Wed Apr 9 13:05:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 13:05:46 +0200 (CEST) Subject: [pypy-commit] stmgc default: Just tweaks Message-ID: <20140409110546.28A8D1D23E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1148:6aed6a111e7a Date: 2014-04-09 12:50 +0200 http://bitbucket.org/pypy/stmgc/changeset/6aed6a111e7a/ Log: Just tweaks diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -57,11 +57,14 @@ a young outside nursery object. */ assert(_is_in_nursery(item)); object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + ssize_t size = 16; - /* the following checks are done like in nursery.c: */ - if (!(item->stm_flags & GCFLAG_HAS_SHADOW) - || (pforwarded_array[0] != GCWORD_MOVED)) { - /* weakref dies */ + /* check if the weakref object was moved out of the nursery */ + if (pforwarded_array[0] != GCWORD_MOVED) { + /* no: weakref dies */ +#ifndef NDEBUG + *WEAKREF_PTR(item, size) = (object_t *)-99; +#endif continue; } @@ -69,15 +72,13 @@ assert(!_is_young(item)); - ssize_t size = 16; object_t *pointing_to = *WEAKREF_PTR(item, size); assert(pointing_to != NULL); if (_is_in_nursery(pointing_to)) { object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; - /* the following checks are done like in nursery.c: */ - if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) - || (pforwarded_array[0] != GCWORD_MOVED)) { + /* check if the target was moved out of the nursery */ + if (pforwarded_array[0] != GCWORD_MOVED) { /* pointing_to dies */ _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ @@ -96,7 +97,9 @@ _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ } - /* pointing_to was already old */ + /* pointing_to is either a surviving young object outside + the nursery, or it was already old; in both cases keeping + the currently stored pointer is what we need */ } LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); })); @@ -128,7 +131,7 @@ stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); char *real_wr = REAL_ADDRESS(pseg->pub.segment_base, wr); object_t *pointing_to = *(object_t **)real_wr; - assert(pointing_to != NULL); + assert((uintptr_t)pointing_to >= NURSERY_END); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); _set_weakref_in_all_segments(weakref, NULL); From noreply at buildbot.pypy.org Wed Apr 9 13:05:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 13:05:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: A failing test Message-ID: <20140409110547.36E8D1D23E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1149:364decc0267b Date: 2014-04-09 13:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/364decc0267b/ Log: A failing test diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -360,3 +360,40 @@ self.switch(1) make_wr() stm_major_collect() + + +class TestManyThreads(BaseTest): + NB_THREADS = NB_SEGMENTS + + def test_weakref_bug3(self): + # make an object + self.start_transaction() + lp0 = stm_allocate(16) + self.push_root(lp0) + self.commit_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + # + # privatize the page in all segments + for i in range(NB_SEGMENTS-1, -1, -1): + self.switch(i) + self.start_transaction() + stm_set_char(lp0, 'A') + self.commit_transaction() + # + self.start_transaction() + lp2 = stm_allocate(16) + self.push_root(lp2) + lp1 = stm_allocate_weakref(lp2) + self.push_root(lp1) + self.commit_transaction() + lp1 = self.pop_root() + lp2 = self.pop_root() + self.push_root(lp2) + self.push_root(lp1) + # the commit copies the weakref to all segments, but misses + # segment #0 + # + self.start_transaction() + stm_major_collect() # reshare all, keeping only segment #0 + assert stm_get_weakref(lp1) == lp2 From noreply at buildbot.pypy.org Wed Apr 9 13:05:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 13:05:48 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix Message-ID: <20140409110548.44B511D23E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1150:0492e398156b Date: 2014-04-09 13:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/0492e398156b/ Log: fix diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -34,7 +34,7 @@ stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); long i; - for (i = 1; i <= NB_SEGMENTS; i++) { + for (i = 0; i <= NB_SEGMENTS; i++) { char *base = get_segment_base(i); object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); *ref_loc = value; From noreply at buildbot.pypy.org Wed Apr 9 14:04:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 9 Apr 2014 14:04:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Organized the benchmark/util methods on SmallInteger in the minibluebookdebug.image Message-ID: <20140409120411.9541A1D2950@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r788:869b4098cc76 Date: 2014-04-07 21:43 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/869b4098cc76/ Log: Organized the benchmark/util methods on SmallInteger in the minibluebookdebug.image diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index ca327f9e875f01098f8dbf0c54e3689b1294d771..1788c3b4bcb37fa0c965acf19424f42fb169e7c9 GIT binary patch [cut] From noreply at buildbot.pypy.org Wed Apr 9 14:04:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 9 Apr 2014 14:04:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added some more methods to SmallInteger. Message-ID: <20140409120412.DCCED1D2950@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r789:b8fbdd57cd26 Date: 2014-04-08 11:49 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b8fbdd57cd26/ Log: Added some more methods to SmallInteger. diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index 1788c3b4bcb37fa0c965acf19424f42fb169e7c9..eefce23907af1a702f0b4e89b85c675a3dffe08b GIT binary patch [cut] From noreply at buildbot.pypy.org Wed Apr 9 14:04:13 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 9 Apr 2014 14:04:13 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Updated traces in jit tests. They basically stayed the same. Message-ID: <20140409120413.E66381D2950@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r790:63607df35291 Date: 2014-04-08 12:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/63607df35291/ Log: Updated traces in jit tests. They basically stayed the same. diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -8,38 +8,38 @@ 0 to: 1000000000 do: [:t|nil]. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i57 = int_le(i50, 1000000000), - guard_true(i57, descr=), - i58 = int_add(i50, 1), - i59 = int_sub(i54, 1), - setfield_gc(ConstPtr(ptr51), i59, descr=), - i60 = int_le(i59, 0), - guard_false(i60, descr=), - jump(p0, p3, i58, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i43, i59, descr=TargetToken(48805440)) + i59 = int_le(i51, 1000000000), + guard_true(i59, descr=), + i60 = int_add(i51, 1), + i61 = int_sub(i55, 1), + setfield_gc(ConstPtr(ptr52), i61, descr=), + i62 = int_le(i61, 0), + guard_false(i62, descr=), + jump(p0, p3, i60, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i61, descr=TargetToken(53667152)) """) self.assert_matches(traces[0].bridges[0], """ + f18 = call(ConstClass(ll_time.ll_time_time), descr=), setfield_gc(ConstPtr(ptr19), 10000, descr=), - guard_no_exception(descr=), - f22 = float_sub(f18, 1395138051.488000), + guard_no_exception(descr=), + f22 = float_sub(f18, 1396948969.119000), f24 = float_mul(f22, 1000.000000), i25 = cast_float_to_int(f24), i27 = int_and(i25, 2147483647), i28 = getfield_gc(ConstPtr(ptr19), descr=), i29 = int_is_zero(i28), - guard_true(i29, descr=), - label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, descr=TargetToken(58860256)), - guard_class(p0, ConstClass(MethodContextShadow), descr=), + guard_true(i29, descr=), + label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, descr=TargetToken(48874112)), + guard_class(p0, 23085560, descr=), p31 = getfield_gc(p0, descr=), - guard_value(p31, ConstPtr(ptr32), descr=), - i33 = getfield_gc_pure(p0, descr=), - guard_not_invalidated(descr=), - i35 = int_le(i16, 1000000000), - guard_true(i35, descr=), - i37 = int_add(i16, 1), + p32 = getfield_gc(p31, descr=), + guard_value(p31, ConstPtr(ptr33), descr=), + guard_value(p32, ConstPtr(ptr34), descr=), + i36 = int_le(i16, 1000000000), + guard_true(i36, descr=), + i38 = int_add(i16, 1), setfield_gc(ConstPtr(ptr19), 9999, descr=), - jump(p0, p1, i37, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, i33, 9999, descr=TargetToken(58766912)) + jump(p0, p1, i38, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, 9999, descr=TargetToken(48817488)) """) def test_constant_string(self, spy, tmpdir): @@ -50,15 +50,16 @@ ^ i """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i61 = int_le(i55, 10000), - guard_true(i61, descr=), - i62 = int_add(i55, 1), - i63 = int_sub(i58, 1), - setfield_gc(ConstPtr(ptr52), i63, descr=), - i64 = int_le(i63, 0), - guard_false(i64, descr=), - jump(p0, p3, i62, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i43, i63, descr=TargetToken(55305792)) + i77 = int_le(i69, 10000), + guard_true(i77, descr=), + guard_not_invalidated(descr=), + i78 = int_add_ovf(i69, i68), + guard_no_overflow(descr=), + i79 = int_sub(i72, 1), + setfield_gc(ConstPtr(ptr66), i79, descr=), + i80 = int_le(i79, 0), + guard_false(i80, descr=), + jump(p0, p3, i78, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i68, i79, descr=TargetToken(16561632)) """) def test_constant_string_equal2(self, spy, tmpdir): @@ -74,17 +75,16 @@ ^ i """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i65 = int_le(i58, 100000), - guard_true(i65, descr=), - i66 = int_add(i58, 1), - i67 = int_sub(i62, 2), - setfield_gc(ConstPtr(ptr59), i67, descr=), - i68 = int_le(i67, 0), - guard_false(i68, descr=), - i70 = arraylen_gc(p50, descr=), - i71 = arraylen_gc(p54, descr=), - jump(p0, p3, i66, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i43, i67, p50, p54, descr=TargetToken(48301680)) + i79 = int_le(i71, 100000), + guard_true(i79, descr=), + i80 = int_add(i71, 1), + i81 = int_sub(i75, 1), + setfield_gc(ConstPtr(ptr72), i81, descr=), + i82 = int_le(i81, 0), + guard_false(i82, descr=), + i84 = arraylen_gc(p65, descr=), + i85 = arraylen_gc(p67, descr=), + jump(p0, p3, i80, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i81, p65, p67, descr=TargetToken(57534304)) """) def test_constant_string_var_equal(self, spy, tmpdir): @@ -104,15 +104,14 @@ ^ i """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i73 = int_le(i62, 100000), - guard_true(i73, descr=), - i74 = int_add(i62, 1), - i77 = int_sub(i70, 1), - setfield_gc(ConstPtr(ptr67), i77, descr=), - i78 = int_le(i77, 0), - guard_false(i78, descr=), - jump(p0, p3, i74, p8, p10, xx, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i77, descr=TargetToken(157713840)) + i72 = int_le(i64, 100000), + guard_true(i72, descr=), + i73 = int_add(i64, 1), + i74 = int_sub(i68, 1), + setfield_gc(ConstPtr(ptr65), i74, descr=), + i75 = int_le(i74, 0), + guard_false(i75, descr=), + jump(p0, p3, i73, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i74, descr=TargetToken(48821968)) """) def test_bitblt_fillWhite(self, spy, tmpdir): @@ -122,245 +121,244 @@ while len(traces) == 0 and retries > 0: retries -= 1 traces = self.run(spy, tmpdir, """ - Display beDisplay. - 1 to: 10000 do: [:i | Display fillWhite]. + Display beDisplay. 1 to: 10000 do: [:i | Display fillWhite]. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i584 = int_le(2, i151), - guard_false(i584, descr=), - i585 = getfield_gc_pure(p576, descr=), - i586 = int_add_ovf(i585, i160), - guard_no_overflow(descr=), - i587 = getfield_gc_pure(p579, descr=), - i588 = int_add_ovf(i587, i169), - guard_no_overflow(descr=), - i589 = int_add_ovf(i174, 1), - guard_no_overflow(descr=), - i590 = int_sub(i572, 2), - setfield_gc(ConstPtr(ptr175), i590, descr=), - i591 = int_le(i590, 0), - guard_false(i591, descr=), - i592 = int_le(i589, i185), - guard_true(i592, descr=), - i593 = getfield_gc_pure(p350, descr=), - i594 = int_mod(i593, i219), - i595 = int_rshift(i594, 31), - i596 = int_and(i219, i595), - i597 = int_add(i594, i596), - i598 = int_add_ovf(1, i597), - guard_no_overflow(descr=), - i599 = int_ge(i597, 0), - guard_true(i599, descr=), - i600 = int_lt(i597, i219), - guard_true(i600, descr=), - i601 = getarrayitem_gc(p241, i597, descr=), - i602 = uint_lt(i601, 0), - guard_false(i602, descr=), - i603 = uint_lt(i601, 2147483647), - guard_true(i603, descr=), - i604 = int_add_ovf(i593, i250), - guard_no_overflow(descr=), - i605 = int_ge(i601, 0), - guard_true(i605, descr=), - i606 = int_and(i601, i601), - i607 = uint_lt(i606, 2147483647), - guard_true(i607, descr=), - i608 = int_add_ovf(i588, 1), - guard_no_overflow(descr=), - i609 = int_ge(i588, 0), - guard_true(i609, descr=), - i610 = int_lt(i588, i281), - guard_true(i610, descr=), - i611 = getarrayitem_raw(i283, i588, descr=), - i612 = uint_lt(i611, 0), - guard_false(i612, descr=), - i613 = uint_lt(i611, 2147483647), - guard_true(i613, descr=), - i614 = int_and(i318, i606), - i615 = uint_lt(i614, 2147483647), - guard_true(i615, descr=), - i616 = getarrayitem_raw(i283, i588, descr=), - i617 = uint_lt(i616, 0), - guard_false(i617, descr=), - i618 = uint_lt(i616, 2147483647), - guard_true(i618, descr=), - i619 = int_ge(i616, 0), - guard_true(i619, descr=), - i620 = int_and(i329, i616), - i621 = uint_lt(i620, 2147483647), - guard_true(i621, descr=), - i622 = int_ge(i614, 0), - guard_true(i622, descr=), - i623 = int_or(i614, i620), - i624 = uint_lt(i623, 2147483647), - guard_true(i624, descr=), - setarrayitem_raw(i283, i588, i623, descr=), - i626 = int_lshift(i588, 3), - i627 = int_ge(i626, i281), - guard_false(i627, descr=), - i628 = uint_rshift(i623, i373), - i629 = int_lshift(i623, i360), - i630 = uint_rshift(i629, i373), - i631 = int_lshift(i630, 8), - i632 = int_or(i628, i631), - i633 = int_lshift(i629, i360), - i634 = uint_rshift(i633, i373), - i635 = int_lshift(i634, 16), - i636 = int_or(i632, i635), - i637 = int_lshift(i633, i360), - i638 = uint_rshift(i637, i373), - i639 = int_lshift(i638, 24), - i640 = int_or(i636, i639), - i641 = int_lshift(i637, i360), - setarrayitem_raw(51118152, i626, i640, descr=), - i642 = int_add(i626, 1), - i643 = int_ge(i642, i281), - guard_false(i643, descr=), - i644 = uint_rshift(i641, i373), - i645 = int_lshift(i641, i360), - i646 = uint_rshift(i645, i373), - i647 = int_lshift(i646, 8), - i648 = int_or(i644, i647), - i649 = int_lshift(i645, i360), - i650 = uint_rshift(i649, i373), - i651 = int_lshift(i650, 16), - i652 = int_or(i648, i651), - i653 = int_lshift(i649, i360), - i654 = uint_rshift(i653, i373), - i655 = int_lshift(i654, 24), - i656 = int_or(i652, i655), - i657 = int_lshift(i653, i360), - setarrayitem_raw(51118152, i642, i656, descr=), - i658 = int_add(i642, 1), - i659 = int_ge(i658, i281), - guard_false(i659, descr=), - i660 = uint_rshift(i657, i373), - i661 = int_lshift(i657, i360), - i662 = uint_rshift(i661, i373), - i663 = int_lshift(i662, 8), - i664 = int_or(i660, i663), - i665 = int_lshift(i661, i360), - i666 = uint_rshift(i665, i373), - i667 = int_lshift(i666, 16), - i668 = int_or(i664, i667), - i669 = int_lshift(i665, i360), - i670 = uint_rshift(i669, i373), - i671 = int_lshift(i670, 24), - i672 = int_or(i668, i671), - i673 = int_lshift(i669, i360), - setarrayitem_raw(51118152, i658, i672, descr=), - i674 = int_add(i658, 1), - i675 = int_ge(i674, i281), - guard_false(i675, descr=), - i676 = uint_rshift(i673, i373), - i677 = int_lshift(i673, i360), - i678 = uint_rshift(i677, i373), - i679 = int_lshift(i678, 8), - i680 = int_or(i676, i679), - i681 = int_lshift(i677, i360), - i682 = uint_rshift(i681, i373), - i683 = int_lshift(i682, 16), - i684 = int_or(i680, i683), - i685 = int_lshift(i681, i360), - i686 = uint_rshift(i685, i373), - i687 = int_lshift(i686, 24), - i688 = int_or(i684, i687), - i689 = int_lshift(i685, i360), - setarrayitem_raw(51118152, i674, i688, descr=), - i690 = int_add(i674, 1), - i691 = int_ge(i690, i281), - guard_false(i691, descr=), - i692 = uint_rshift(i689, i373), - i693 = int_lshift(i689, i360), - i694 = uint_rshift(i693, i373), - i695 = int_lshift(i694, 8), - i696 = int_or(i692, i695), - i697 = int_lshift(i693, i360), - i698 = uint_rshift(i697, i373), - i699 = int_lshift(i698, 16), - i700 = int_or(i696, i699), - i701 = int_lshift(i697, i360), - i702 = uint_rshift(i701, i373), - i703 = int_lshift(i702, 24), - i704 = int_or(i700, i703), - i705 = int_lshift(i701, i360), - setarrayitem_raw(51118152, i690, i704, descr=), - i706 = int_add(i690, 1), - i707 = int_ge(i706, i281), - guard_false(i707, descr=), - i708 = uint_rshift(i705, i373), - i709 = int_lshift(i705, i360), - i710 = uint_rshift(i709, i373), - i711 = int_lshift(i710, 8), - i712 = int_or(i708, i711), - i713 = int_lshift(i709, i360), - i714 = uint_rshift(i713, i373), - i715 = int_lshift(i714, 16), - i716 = int_or(i712, i715), - i717 = int_lshift(i713, i360), - i718 = uint_rshift(i717, i373), - i719 = int_lshift(i718, 24), - i720 = int_or(i716, i719), - i721 = int_lshift(i717, i360), - setarrayitem_raw(51118152, i706, i720, descr=), - i722 = int_add(i706, 1), - i723 = int_ge(i722, i281), - guard_false(i723, descr=), - i724 = uint_rshift(i721, i373), - i725 = int_lshift(i721, i360), - i726 = uint_rshift(i725, i373), - i727 = int_lshift(i726, 8), - i728 = int_or(i724, i727), - i729 = int_lshift(i725, i360), - i730 = uint_rshift(i729, i373), - i731 = int_lshift(i730, 16), - i732 = int_or(i728, i731), - i733 = int_lshift(i729, i360), - i734 = uint_rshift(i733, i373), - i735 = int_lshift(i734, 24), - i736 = int_or(i732, i735), - i737 = int_lshift(i733, i360), - setarrayitem_raw(51118152, i722, i736, descr=), - i738 = int_add(i722, 1), - i739 = int_ge(i738, i281), - guard_false(i739, descr=), - i740 = uint_rshift(i737, i373), - i741 = int_lshift(i737, i360), - i742 = uint_rshift(i741, i373), - i743 = int_lshift(i742, 8), - i744 = int_or(i740, i743), - i745 = int_lshift(i741, i360), - i746 = uint_rshift(i745, i373), - i747 = int_lshift(i746, 16), - i748 = int_or(i744, i747), - i749 = int_lshift(i745, i360), - i750 = uint_rshift(i749, i373), - i751 = int_lshift(i750, 24), - i752 = int_or(i748, i751), - i753 = int_lshift(i749, i360), - setarrayitem_raw(51118152, i738, i752, descr=), - i754 = int_add(i738, 1), - i755 = int_add_ovf(i586, i558), - guard_no_overflow(descr=), - i756 = int_add_ovf(i588, i558), - guard_no_overflow(descr=), - i757 = int_sub(i590, 23), - setfield_gc(ConstPtr(ptr175), i757, descr=), - i758 = int_le(i757, 0), - guard_false(i758, descr=), - p759 = new_with_vtable(18295080), - setfield_gc(p759, i755, descr=), - setarrayitem_gc(p145, 34, p759, descr=), - p760 = new_with_vtable(18295080), - setfield_gc(p760, i756, descr=), - setarrayitem_gc(p145, 35, p760, descr=), - p761 = new_with_vtable(18295080), - setfield_gc(p761, i604, descr=), - setarrayitem_gc(p145, 20, p761, descr=), - i762 = arraylen_gc(p145, descr=), - i763 = arraylen_gc(p568, descr=), - jump(p0, p3, p8, i601, p582, i606, p18, i589, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, i139, 1, p147, p759, i160, p156, p760, i169, p165, p145, i757, i185, p182, p761, i219, p195, p241, i250, p248, p257, p141, p272, i281, i283, i318, i329, i373, i360, i558, p556, p582, p568, descr=TargetToken(53262992)) + i598 = int_le(2, i153), + guard_false(i598, descr=), + i599 = getfield_gc_pure(p589, descr=), + i600 = int_add_ovf(i599, i162), + guard_no_overflow(descr=), + i601 = getfield_gc_pure(p592, descr=), + i602 = int_add_ovf(i601, i171), + guard_no_overflow(descr=), + i603 = int_add_ovf(i176, 1), + guard_no_overflow(descr=), + i604 = int_sub(i585, 1), + setfield_gc(ConstPtr(ptr177), i604, descr=), + i605 = int_le(i604, 0), + guard_false(i605, descr=), + i606 = int_le(i603, i187), + guard_true(i606, descr=), + guard_not_invalidated(descr=), + i607 = getfield_gc_pure(p364, descr=), + i608 = int_mod(i607, i224), + i609 = int_rshift(i608, 31), + i610 = int_and(i224, i609), + i611 = int_add(i608, i610), + i612 = int_add_ovf(1, i611), + guard_no_overflow(descr=), + i613 = int_ge(i611, 0), + guard_true(i613, descr=), + i614 = int_lt(i611, i224), + guard_true(i614, descr=), + i615 = getarrayitem_gc(p247, i611, descr=), + i616 = uint_lt(i615, 0) + guard_false(i616, descr=) + i617 = uint_lt(i615, 2147483647) + guard_true(i617, descr=) + i618 = int_add_ovf(i607, i256) + guard_no_overflow(descr=) + i619 = int_ge(i615, 0) + guard_true(i619, descr=) + i620 = int_and(i615, i615) + i621 = uint_lt(i620, 2147483647) + guard_true(i621, descr=) + i622 = int_add_ovf(i602, 1) + guard_no_overflow(descr=) + i623 = int_ge(i602, 0) + guard_true(i623, descr=) + i624 = int_lt(i602, i290) + guard_true(i624, descr=) + i625 = getarrayitem_raw(i292, i602, descr=) + i626 = uint_lt(i625, 0) + guard_false(i626, descr=) + i627 = uint_lt(i625, 2147483647) + guard_true(i627, descr=) + i628 = int_and(i328, i620) + i629 = uint_lt(i628, 2147483647) + guard_true(i629, descr=) + i630 = getarrayitem_raw(i292, i602, descr=) + i631 = uint_lt(i630, 0) + guard_false(i631, descr=) + i632 = uint_lt(i630, 2147483647) + guard_true(i632, descr=) + i633 = int_ge(i630, 0) + guard_true(i633, descr=) + i634 = int_and(i343, i630) + i635 = uint_lt(i634, 2147483647) + guard_true(i635, descr=) + i636 = int_ge(i628, 0) + guard_true(i636, descr=) + i637 = int_or(i628, i634) + i638 = uint_lt(i637, 2147483647) + guard_true(i638, descr=) + setarrayitem_raw(i292, i602, i637, descr=) + i640 = int_lshift(i602, 3) + i641 = int_ge(i640, i290) + guard_false(i641, descr=) + i642 = uint_rshift(i637, i386) + i643 = int_lshift(i637, i373) + i644 = uint_rshift(i643, i386) + i645 = int_lshift(i644, 8) + i646 = int_or(i642, i645) + i647 = int_lshift(i643, i373) + i648 = uint_rshift(i647, i386) + i649 = int_lshift(i648, 16) + i650 = int_or(i646, i649) + i651 = int_lshift(i647, i373) + i652 = uint_rshift(i651, i386) + i653 = int_lshift(i652, 24) + i654 = int_or(i650, i653) + i655 = int_lshift(i651, i373) + setarrayitem_raw(8650752, i640, i654, descr=) + i656 = int_add(i640, 1) + i657 = int_ge(i656, i290) + guard_false(i657, descr=) + i658 = uint_rshift(i655, i386) + i659 = int_lshift(i655, i373) + i660 = uint_rshift(i659, i386) + i661 = int_lshift(i660, 8) + i662 = int_or(i658, i661) + i663 = int_lshift(i659, i373) + i664 = uint_rshift(i663, i386) + i665 = int_lshift(i664, 16) + i666 = int_or(i662, i665) + i667 = int_lshift(i663, i373) + i668 = uint_rshift(i667, i386) + i669 = int_lshift(i668, 24) + i670 = int_or(i666, i669) + i671 = int_lshift(i667, i373) + setarrayitem_raw(8650752, i656, i670, descr=) + i672 = int_add(i656, 1) + i673 = int_ge(i672, i290) + guard_false(i673, descr=) + i674 = uint_rshift(i671, i386) + i675 = int_lshift(i671, i373) + i676 = uint_rshift(i675, i386) + i677 = int_lshift(i676, 8) + i678 = int_or(i674, i677) + i679 = int_lshift(i675, i373) + i680 = uint_rshift(i679, i386) + i681 = int_lshift(i680, 16) + i682 = int_or(i678, i681) + i683 = int_lshift(i679, i373) + i684 = uint_rshift(i683, i386) + i685 = int_lshift(i684, 24) + i686 = int_or(i682, i685) + i687 = int_lshift(i683, i373) + setarrayitem_raw(8650752, i672, i686, descr=) + i688 = int_add(i672, 1) + i689 = int_ge(i688, i290) + guard_false(i689, descr=) + i690 = uint_rshift(i687, i386) + i691 = int_lshift(i687, i373) + i692 = uint_rshift(i691, i386) + i693 = int_lshift(i692, 8) + i694 = int_or(i690, i693) + i695 = int_lshift(i691, i373) + i696 = uint_rshift(i695, i386) + i697 = int_lshift(i696, 16) + i698 = int_or(i694, i697) + i699 = int_lshift(i695, i373) + i700 = uint_rshift(i699, i386) + i701 = int_lshift(i700, 24) + i702 = int_or(i698, i701) + i703 = int_lshift(i699, i373) + setarrayitem_raw(8650752, i688, i702, descr=) + i704 = int_add(i688, 1) + i705 = int_ge(i704, i290) + guard_false(i705, descr=) + i706 = uint_rshift(i703, i386) + i707 = int_lshift(i703, i373) + i708 = uint_rshift(i707, i386) + i709 = int_lshift(i708, 8) + i710 = int_or(i706, i709) + i711 = int_lshift(i707, i373) + i712 = uint_rshift(i711, i386) + i713 = int_lshift(i712, 16) + i714 = int_or(i710, i713) + i715 = int_lshift(i711, i373) + i716 = uint_rshift(i715, i386) + i717 = int_lshift(i716, 24) + i718 = int_or(i714, i717) + i719 = int_lshift(i715, i373) + setarrayitem_raw(8650752, i704, i718, descr=) + i720 = int_add(i704, 1) + i721 = int_ge(i720, i290) + guard_false(i721, descr=) + i722 = uint_rshift(i719, i386) + i723 = int_lshift(i719, i373) + i724 = uint_rshift(i723, i386) + i725 = int_lshift(i724, 8) + i726 = int_or(i722, i725) + i727 = int_lshift(i723, i373) + i728 = uint_rshift(i727, i386) + i729 = int_lshift(i728, 16) + i730 = int_or(i726, i729) + i731 = int_lshift(i727, i373) + i732 = uint_rshift(i731, i386) + i733 = int_lshift(i732, 24) + i734 = int_or(i730, i733) + i735 = int_lshift(i731, i373) + setarrayitem_raw(8650752, i720, i734, descr=) + i736 = int_add(i720, 1) + i737 = int_ge(i736, i290) + guard_false(i737, descr=) + i738 = uint_rshift(i735, i386) + i739 = int_lshift(i735, i373) + i740 = uint_rshift(i739, i386) + i741 = int_lshift(i740, 8) + i742 = int_or(i738, i741) + i743 = int_lshift(i739, i373) + i744 = uint_rshift(i743, i386) + i745 = int_lshift(i744, 16) + i746 = int_or(i742, i745) + i747 = int_lshift(i743, i373) + i748 = uint_rshift(i747, i386) + i749 = int_lshift(i748, 24) + i750 = int_or(i746, i749) + i751 = int_lshift(i747, i373) + setarrayitem_raw(8650752, i736, i750, descr=) + i752 = int_add(i736, 1) + i753 = int_ge(i752, i290) + guard_false(i753, descr=) + i754 = uint_rshift(i751, i386) + i755 = int_lshift(i751, i373) + i756 = uint_rshift(i755, i386) + i757 = int_lshift(i756, 8) + i758 = int_or(i754, i757) + i759 = int_lshift(i755, i373) + i760 = uint_rshift(i759, i386) + i761 = int_lshift(i760, 16) + i762 = int_or(i758, i761) + i763 = int_lshift(i759, i373) + i764 = uint_rshift(i763, i386) + i765 = int_lshift(i764, 24) + i766 = int_or(i762, i765) + i767 = int_lshift(i763, i373) + setarrayitem_raw(8650752, i752, i766, descr=) + i768 = int_add(i752, 1) + i769 = int_add_ovf(i600, i571) + guard_no_overflow(descr=) + i770 = int_add_ovf(i602, i571) + guard_no_overflow(descr=) + i771 = int_sub(i604, 11) + setfield_gc(ConstPtr(ptr177), i771, descr=) + i772 = int_le(i771, 0) + guard_false(i772, descr=) + p773 = new_with_vtable(23083336) + setfield_gc(p773, i769, descr=) + setarrayitem_gc(p147, 34, p773, descr=) + p774 = new_with_vtable(23083336) + setfield_gc(p774, i770, descr=) + setarrayitem_gc(p147, 35, p774, descr=) + p775 = new_with_vtable(23083336) + setfield_gc(p775, i618, descr=) + setarrayitem_gc(p147, 20, p775, descr=) + i776 = arraylen_gc(p147, descr=) + i777 = arraylen_gc(p581, descr=) + jump(p0, p3, p8, i615, p596, i620, p18, i603, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, 1, p149, p773, i162, p158, p774, i171, p167, p147, i771, i187, p184, p190, p775, i224, p200, p247, i256, p254, p263, p142, p281, i290, i292, i328, i343, i386, i373, i571, p569, p596, p581, descr=TargetToken(48932608)) """) @py.test.mark.skipif("'just dozens of long traces'") diff --git a/spyvm/test/jittest/test_strategies.py b/spyvm/test/jittest/test_strategies.py --- a/spyvm/test/jittest/test_strategies.py +++ b/spyvm/test/jittest/test_strategies.py @@ -12,82 +12,82 @@ (1 to: 10000) asOrderedCollection. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i190 = getarrayitem_gc(p52, 1, descr=), - i191 = int_eq(i190, 2147483647), - guard_false(i191, descr=), - i192 = int_ge(i190, i184), - guard_true(i192, descr=), - cond_call(i74, 16971392, p66, descr=), - cond_call(i102, 16971392, p90, descr=), - cond_call(i102, 16971392, p90, descr=), - p193 = getarrayitem_gc(p104, 0, descr=), - cond_call(i102, 16971392, p90, descr=), - p195 = new_with_vtable(18295080), - setfield_gc(p195, i184, descr=), - setarrayitem_gc(p104, 1, p195, descr=), - setarrayitem_gc(p78, 0, p193, descr=), - setfield_gc(p66, 2, descr=), - setfield_gc(p66, 15, descr=), - setfield_gc(p66, p0, descr=), - setfield_gc(ConstPtr(ptr80), i87, descr=), - setarrayitem_gc(p78, 1, p195, descr=), - guard_class(p193, 18294904, descr=), - p196 = getfield_gc(p193, descr=), - p197 = getfield_gc(p196, descr=), - guard_value(p197, ConstPtr(ptr117), descr=), - p198 = getfield_gc(p193, descr=), - setarrayitem_gc(p78, 0, ConstPtr(null), descr=), - setfield_gc(p66, 0, descr=), - setfield_gc(ConstPtr(ptr80), i131, descr=), - setarrayitem_gc(p78, 1, ConstPtr(null), descr=), - guard_class(p198, ConstClass(ListStorageShadow), descr=), - p201 = getfield_gc_pure(p198, descr=), - p202 = getarrayitem_gc(p201, 2, descr=), - p203 = getarrayitem_gc(p201, 0, descr=), - guard_class(p203, 18294904, descr=), - p204 = getfield_gc(p203, descr=), - p205 = getfield_gc(p204, descr=), - guard_value(p205, ConstPtr(ptr149), descr=), - p206 = getfield_gc(p203, descr=), - guard_nonnull_class(p206, 18300088, descr=), - p207 = getfield_gc_pure(p206, descr=), - i208 = arraylen_gc(p207, descr=), - i209 = getfield_gc_pure(p206, descr=), - guard_nonnull_class(p202, 18295080, descr=), - i210 = getfield_gc_pure(p202, descr=), - i211 = int_eq(i210, i208), - guard_false(i211, descr=), - i212 = int_add_ovf(i210, 1), - guard_no_overflow(descr=), - i213 = int_ge(i210, 0), - guard_true(i213, descr=), - i214 = int_lt(i210, i208), - guard_true(i214, descr=), - i215 = int_eq(i184, 2147483647), - guard_false(i215, descr=), - setarrayitem_gc(p207, i210, i184, descr=), - i216 = getarrayitem_gc(p52, 2, descr=), - setfield_gc(p66, -1, descr=), - setfield_gc(p66, ConstPtr(null), descr=), - setfield_gc(ConstPtr(ptr80), i83, descr=), - i217 = int_eq(i216, 2147483647), - guard_false(i217, descr=), - i218 = int_add_ovf(i184, i216), - guard_no_overflow(descr=), - i219 = int_sub(i187, 7), - setfield_gc(ConstPtr(ptr80), i219, descr=), - i220 = int_le(i219, 0), - guard_false(i220, descr=), - p221 = new_with_vtable(18295080), - setfield_gc(p221, i212, descr=), - setarrayitem_gc(p201, 2, p221, descr=), - i222 = arraylen_gc(p52, descr=), - i223 = arraylen_gc(p78, descr=), - i224 = arraylen_gc(p104, descr=), - jump(p0, p3, p6, i218, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i47, p52, i74, p66, i102, p90, p104, p78, i87, i89, i131, i119, p145, i83, i219, descr=TargetToken(53064608)) + i197 = getarrayitem_gc(p54, 1, descr=), + i198 = int_eq(i197, 2147483647), + guard_false(i198, descr=), + i199 = int_ge(i197, i190), + guard_true(i199, descr=), + cond_call(i76, 21753520, p68, descr=), + cond_call(i106, 21753520, p92, descr=), + cond_call(i106, 21753520, p92, descr=), + p200 = getarrayitem_gc(p108, 0, descr=), + cond_call(i106, 21753520, p92, descr=), + p202 = new_with_vtable(23083336), + setfield_gc(p202, i190, descr=), + setarrayitem_gc(p108, 1, p202, descr=), + setarrayitem_gc(p80, 0, p200, descr=), + setfield_gc(p68, 2, descr=), + setfield_gc(p68, 15, descr=), + setfield_gc(p68, p0, descr=), + setfield_gc(ConstPtr(ptr82), i89, descr=), + setarrayitem_gc(p80, 1, p202, descr=), + guard_class(p200, 23083152, descr=), + p203 = getfield_gc(p200, descr=), + p204 = getfield_gc(p203, descr=), + guard_value(p204, ConstPtr(ptr121), descr=), + guard_not_invalidated(descr=), + p205 = getfield_gc(p200, descr=), + setarrayitem_gc(p80, 0, ConstPtr(null), descr=), + setfield_gc(p68, 0, descr=), + setfield_gc(ConstPtr(ptr82), i136, descr=), + setarrayitem_gc(p80, 1, ConstPtr(null), descr=), + guard_class(p205, ConstClass(ListStorageShadow), descr=), + p208 = getfield_gc_pure(p205, descr=), + p209 = getarrayitem_gc(p208, 2, descr=), + p210 = getarrayitem_gc(p208, 0, descr=), + guard_class(p210, 23083152, descr=), + p211 = getfield_gc(p210, descr=), + p212 = getfield_gc(p211, descr=), + guard_value(p212, ConstPtr(ptr154), descr=), + p213 = getfield_gc(p210, descr=), + guard_nonnull_class(p213, 23088412, descr=), + p214 = getfield_gc_pure(p213, descr=), + i215 = arraylen_gc(p214, descr=), + i216 = getfield_gc_pure(p213, descr=), + guard_nonnull_class(p209, 23083336, descr=), + i217 = getfield_gc_pure(p209, descr=), + i218 = int_eq(i217, i215), + guard_false(i218, descr=), + i219 = int_add_ovf(i217, 1), + guard_no_overflow(descr=), + i220 = int_ge(i217, 0), + guard_true(i220, descr=), + i221 = int_lt(i217, i215), + guard_true(i221, descr=), + i222 = int_eq(i190, 2147483647), + guard_false(i222, descr=), + setarrayitem_gc(p214, i217, i190, descr=), + i223 = getarrayitem_gc(p54, 2, descr=), + setfield_gc(p68, -1, descr=), + setfield_gc(p68, ConstPtr(null), descr=), + setfield_gc(ConstPtr(ptr82), i85, descr=), + i224 = int_eq(i223, 2147483647), + guard_false(i224, descr=), + i225 = int_add_ovf(i190, i223), + guard_no_overflow(descr=), + i226 = int_sub(i193, 5), + setfield_gc(ConstPtr(ptr82), i226, descr=), + i227 = int_le(i226, 0), + guard_false(i227, descr=), + p228 = new_with_vtable(23083336), + setfield_gc(p228, i219, descr=), + setarrayitem_gc(p208, 2, p228, descr=), + i229 = arraylen_gc(p54, descr=), + i230 = arraylen_gc(p80, descr=), + i231 = arraylen_gc(p108, descr=), + jump(p0, p3, p6, i225, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p54, i76, p68, i106, p92, p108, p80, i89, i91, i136, i85, i226, descr=TargetToken(48645456)) """) - + def test_indexOf(self, spy, tmpdir): traces = self.run(spy, tmpdir, """ @@ -95,32 +95,32 @@ """) # First loop: asOrderedCollection, second loop: makeRoomAtLast self.assert_matches(traces[2].loop, """ - guard_not_invalidated(descr=), - i143 = int_le(i137, i62), - guard_true(i143, descr=), - setfield_gc(ConstPtr(ptr84), i91, descr=), - i144 = int_add_ovf(i137, i100), - guard_no_overflow(descr=), - i145 = int_sub(i144, 1), - i146 = int_gt(i145, i108), - guard_false(i146, descr=), - i147 = int_sub(i145, 1), - i148 = int_ge(i147, 0), - guard_true(i148, descr=), - i149 = int_lt(i147, i127), - guard_true(i149, descr=), - i150 = getarrayitem_gc(p126, i147, descr=), - i151 = int_eq(i150, 2147483647), - guard_false(i151, descr=), - setfield_gc(ConstPtr(ptr84), i87, descr=), - i152 = int_eq(i150, i134), - guard_false(i152, descr=), - i153 = int_add_ovf(i137, 1), - guard_no_overflow(descr=), - i154 = int_sub(i140, 5), - setfield_gc(ConstPtr(ptr84), i154, descr=), - i155 = int_le(i154, 0), - guard_false(i155, descr=), - i156 = arraylen_gc(p96, descr=), - jump(p0, p3, p6, p8, p10, i153, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, p48, p50, p52, i57, i62, i91, i100, p64, p98, i80, i108, p105, p111, i127, p126, i87, i134, i154, p96, descr=TargetToken(51324000)) + i144 = int_le(i137, i63), + guard_true(i144, descr=), + guard_not_invalidated(descr=), + setfield_gc(ConstPtr(ptr85), i92, descr=), + i145 = int_add_ovf(i137, i101), + guard_no_overflow(descr=), + i146 = int_sub(i145, 1), + i147 = int_gt(i146, i109), + guard_false(i147, descr=), + i148 = int_sub(i146, 1), + i149 = int_ge(i148, 0), + guard_true(i149, descr=), + i150 = int_lt(i148, i127), + guard_true(i150, descr=), + i151 = getarrayitem_gc(p126, i148, descr=), + i152 = int_eq(i151, 2147483647), + guard_false(i152, descr=), + setfield_gc(ConstPtr(ptr85), i88, descr=), + i153 = int_eq(i151, i134), + guard_false(i153, descr=), + i154 = int_add_ovf(i137, 1), + guard_no_overflow(descr=), + i155 = int_sub(i140, 3), + setfield_gc(ConstPtr(ptr85), i155, descr=), + i156 = int_le(i155, 0), + guard_false(i156, descr=), + i157 = arraylen_gc(p97, descr=), + jump(p0, p3, p6, p8, p10, i154, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, p48, p50, p52, i63, p65, i92, i101, p99, i109, p106, p112, i127, p126, i88, i134, i155, p97, descr=TargetToken(53728496)) """) From noreply at buildbot.pypy.org Wed Apr 9 14:04:15 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 9 Apr 2014 14:04:15 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added code to jit.py Message-ID: <20140409120415.014A31D2950@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r791:e811ac7cd9d8 Date: 2014-04-09 14:03 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e811ac7cd9d8/ Log: Added code to jit.py diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -110,10 +110,15 @@ def main(): # ===== First define which image we are going to use. - imagename = "mini.image" + imagename = "minibluebookdebug.image" + # imagename = "mini.image" # imagename = "minitest.image" # imagename = "Squeak4.5-noBitBlt.image" + # ===== Define the code to be executed, if any. + # code = "^6+7" + code = "10000 timesRepeat: [ 0 makeStackDepth: 10 ]" + # ===== These entry-points pre-load the image and directly execute a single frame. # func = preload_perform(imagename, model.W_SmallInteger(1000), 'loopTest2') # func = preload_perform(imagename, model.W_SmallInteger(777), 'name') @@ -122,15 +127,15 @@ # ===== These execute the complete interpreter # ===== XXX These do not work because loading the image file while meta-interpreting always leads to # ===== a 'Bad file descriptor' error. - # func = full_vm_code(imagename, "^5+6") + # func = full_vm_code(imagename, code) # func = full_vm_method(imagename, "name", 33) # func = full_vm_image(imagename) # ==== These entry-points pre-load the image and then use methods from the entry-point module. # ==== This is very close to what actually happens in the VM, but with a pre-loaded image. # func = run_benchmark(imagename, "loopTest2", 10000) - # func = run_code(imagename, "^6+7", as_benchmark=True) - func = run_image(imagename) + func = run_code(imagename, code, as_benchmark=False) + # func = run_image(imagename) # ===== Now we can either simply execute the entry-point, or meta-interpret it (showing all encountered loops). # res = func() From noreply at buildbot.pypy.org Wed Apr 9 14:05:04 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 9 Apr 2014 14:05:04 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-ignoringStackOverflow: Commented out handling of stack-overflow and interrupts. Message-ID: <20140409120504.834721D2950@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-ignoringStackOverflow Changeset: r792:c0af48bf0025 Date: 2014-04-09 14:04 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c0af48bf0025/ Log: Commented out handling of stack-overflow and interrupts. Just for experiments. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -78,12 +78,13 @@ self._loop = True s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.remaining_stack_depth == self.max_stack_depth + #assert self.remaining_stack_depth == self.max_stack_depth # Need to save s_sender, c_loop will nil this on return s_sender = s_new_context.s_sender() try: s_new_context = self.c_loop(s_new_context) except StackOverflow, e: + print "Excepted StackOverflow" s_new_context = e.s_context except Return, nlr: s_new_context = s_sender @@ -101,15 +102,15 @@ def c_loop(self, s_context, may_context_switch=True): old_pc = 0 - if not jit.we_are_jitted() and may_context_switch: - self.quick_check_for_interrupt(s_context) + #if not jit.we_are_jitted() and may_context_switch: + # self.quick_check_for_interrupt(s_context) method = s_context.w_method() while True: pc = s_context.pc() if pc < old_pc: - if jit.we_are_jitted(): - self.quick_check_for_interrupt(s_context, - dec=self._get_adapted_tick_counter()) + #if jit.we_are_jitted(): + # self.quick_check_for_interrupt(s_context, + # dec=self._get_adapted_tick_counter()) self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, s_context=s_context) @@ -140,14 +141,16 @@ if not self._loop: return s_new_frame # this test is done to not loop in test, # but rather step just once where wanted - if self.remaining_stack_depth <= 1: - raise StackOverflow(s_new_frame) + #if self.remaining_stack_depth <= 1: + # print "Raising StackOverflow due to remaining_stack_depth" + # raise StackOverflow(s_new_frame) - self.remaining_stack_depth -= 1 + #self.remaining_stack_depth -= 1 try: retval = self.c_loop(s_new_frame, may_context_switch) finally: - self.remaining_stack_depth += 1 + pass + # self.remaining_stack_depth += 1 return retval def perform(self, w_receiver, selector, *arguments_w): From noreply at buildbot.pypy.org Wed Apr 9 14:57:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 9 Apr 2014 14:57:56 +0200 (CEST) Subject: [pypy-commit] benchmarks default: Add a benchmark performing inserts/finds/removes on a skip-list. We currently Message-ID: <20140409125756.C76D11C3282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r247:1100457d9161 Date: 2014-04-09 14:58 +0200 http://bitbucket.org/pypy/benchmarks/changeset/1100457d9161/ Log: Add a benchmark performing inserts/finds/removes on a skip-list. We currently get slower with more threads which we should try to avoid diff --git a/multithread/skiplist/skiplist.py b/multithread/skiplist/skiplist.py new file mode 100644 --- /dev/null +++ b/multithread/skiplist/skiplist.py @@ -0,0 +1,138 @@ +# https://github.com/kunigami/blog-examples/tree/master/2012-09-23-skip-list + +from common.abstract_threading import atomic, Future +import time, threading + +import random + +thread_local = threading.local() + +class SkipNode: + """A node from a skip list""" + def __init__(self, height = 0, elem = None): + self.elem = elem + self.next = [None]*height + +class SkipList: + def __init__(self): + self.head = SkipNode() + self.len = 0 + self.maxHeight = 0 + + def __len__(self): + return self.len + + def find(self, elem, update = None): + if update == None: + update = self.updateList(elem) + if len(update) > 0: + candidate = update[0].next[0] + if candidate != None and candidate.elem == elem: + return candidate + return None + + def contains(self, elem, update = None): + return self.find(elem, update) != None + + def randomHeight(self): + height = 1 + while thread_local.rnd.randint(1, 2) != 1: + height += 1 + return height + + def updateList(self, elem): + update = [None] * self.maxHeight + x = self.head + for i in reversed(xrange(self.maxHeight)): + while x.next[i] != None and x.next[i].elem < elem: + x = x.next[i] + update[i] = x + return update + + def insert(self, elem): + node = SkipNode(self.randomHeight(), elem) + + # conflicts with every find(): + self.maxHeight = max(self.maxHeight, len(node.next)) + + while len(self.head.next) < len(node.next): + self.head.next.append(None) + + update = self.updateList(elem) + if self.find(elem, update) == None: + for i in xrange(len(node.next)): + node.next[i] = update[i].next[i] + update[i].next[i] = node + self.len += 1 + + def remove(self, elem): + update = self.updateList(elem) + x = self.find(elem, update) + if x != None: + for i in reversed(range(len(x.next))): + update[i].next[i] = x.next[i] + if self.head.next[i] == None: + self.maxHeight -= 1 + self.len -= 1 + + def printList(self): + for i in range(len(self.head.next)-1, -1, -1): + x = self.head + while x.next[i] != None: + print x.next[i].elem, + x = x.next[i] + print '' + + + +OPS = [SkipList.find] * 98 + [SkipList.insert, SkipList.remove] + + +def task(id, slist, ops): + print "start task with %s ops" % ops + r = random.Random() + r.seed(id) + thread_local.rnd = r + + for _ in xrange(ops): + op = r.choice(OPS) + elem = r.randint(1, 10000) + with atomic: + op(slist, elem) + + print "task ended" + + +def chunks(l, n): + """ Yield successive n-sized chunks from l. """ + for i in xrange(0, len(l), n): + yield l[i:i+n] + + + +def run(threads=2, operations=2000000): + threads = int(threads) + operations = int(operations) + + thread_local.rnd = random + + slist = SkipList() + for _ in xrange(1000): + slist.insert(random.randint(1, 1000)) + + c_len = operations // threads + fs = [] + for i in xrange(threads): + fs.append(Future(task, i, slist, c_len)) + for f in fs: + f() + + # print "list:" + # slist.printList() + + + + + +if __name__ == '__main__': + run() diff --git a/multithread/threadworms/threadworms.py b/multithread/threadworms/threadworms.py --- a/multithread/threadworms/threadworms.py +++ b/multithread/threadworms/threadworms.py @@ -56,7 +56,7 @@ def run(self): - for _ in xrange(NUM_STEPS): + for _ in range(NUM_STEPS): if self.rnd.randint(0, 100) < 20: # 20% to change direction self.direction = self.rnd.choice((UP, DOWN, LEFT, RIGHT)) @@ -127,7 +127,7 @@ def run(worms=2, steps=10000000): global DISPLAYSURF, NUM_WORMS, NUM_STEPS, GRID NUM_WORMS = int(worms) - NUM_STEPS = int(steps) / NUM_WORMS + NUM_STEPS = int(steps) // NUM_WORMS GRID = [] for x in range(CELLS_WIDE): From noreply at buildbot.pypy.org Wed Apr 9 16:27:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Apr 2014 16:27:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a FAQ entry "translation swaps forever" Message-ID: <20140409142738.65B481D2351@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70499:69cafd5dfbfa Date: 2014-04-09 16:26 +0200 http://bitbucket.org/pypy/pypy/changeset/69cafd5dfbfa/ Log: Add a FAQ entry "translation swaps forever" diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -459,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- From noreply at buildbot.pypy.org Wed Apr 9 16:37:02 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 9 Apr 2014 16:37:02 +0200 (CEST) Subject: [pypy-commit] benchmarks default: add a btree benchmark with similar problems as skip-list (a bit better though) Message-ID: <20140409143702.6D8251C3282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r248:a7f0f6c6e15e Date: 2014-04-09 16:37 +0200 http://bitbucket.org/pypy/benchmarks/changeset/a7f0f6c6e15e/ Log: add a btree benchmark with similar problems as skip-list (a bit better though) diff --git a/multithread/btree/btree.py b/multithread/btree/btree.py new file mode 100644 --- /dev/null +++ b/multithread/btree/btree.py @@ -0,0 +1,354 @@ +# https://github.com/MartinThoma/algorithms/tree/master/datastructures + +from common.abstract_threading import atomic, Future +import time, threading + +import random + +thread_local = threading.local() + +import bisect + + +class _BNode(object): + __slots__ = ["tree", "contents", "children"] + + def __init__(self, tree, contents=None, children=None): + self.tree = tree + self.contents = contents or [] + self.children = children or [] + if self.children: + assert len(self.contents) + 1 == len(self.children), \ + "one more child than data item required" + + def __repr__(self): + name = getattr(self, "children", 0) and "Branch" or "Leaf" + return "<%s %s>" % (name, ", ".join(map(str, self.contents))) + + def lateral(self, parent, parent_index, dest, dest_index): + if parent_index > dest_index: + dest.contents.append(parent.contents[dest_index]) + parent.contents[dest_index] = self.contents.pop(0) + if self.children: + dest.children.append(self.children.pop(0)) + else: + dest.contents.insert(0, parent.contents[parent_index]) + parent.contents[parent_index] = self.contents.pop() + if self.children: + dest.children.insert(0, self.children.pop()) + + def shrink(self, ancestors): + parent = None + + if ancestors: + parent, parent_index = ancestors.pop() + # try to lend to the left neighboring sibling + if parent_index: + left_sib = parent.children[parent_index - 1] + if len(left_sib.contents) < self.tree.order: + self.lateral( + parent, parent_index, left_sib, parent_index - 1) + return + + # try the right neighbor + if parent_index + 1 < len(parent.children): + right_sib = parent.children[parent_index + 1] + if len(right_sib.contents) < self.tree.order: + self.lateral( + parent, parent_index, right_sib, parent_index + 1) + return + + sibling, push = self.split() + + if not parent: + parent, parent_index = self.tree.BRANCH( + self.tree, children=[self]), 0 + self.tree._root = parent + + # pass the median up to the parent + parent.contents.insert(parent_index, push) + parent.children.insert(parent_index + 1, sibling) + if len(parent.contents) > parent.tree.order: + parent.shrink(ancestors) + + def grow(self, ancestors): + parent, parent_index = ancestors.pop() + + minimum = self.tree.order // 2 + left_sib = right_sib = None + + # try to borrow from the right sibling + if parent_index + 1 < len(parent.children): + right_sib = parent.children[parent_index + 1] + if len(right_sib.contents) > minimum: + right_sib.lateral(parent, parent_index + 1, self, parent_index) + return + + # try to borrow from the left sibling + if parent_index: + left_sib = parent.children[parent_index - 1] + if len(left_sib.contents) > minimum: + left_sib.lateral(parent, parent_index - 1, self, parent_index) + return + + # consolidate with a sibling - try left first + if left_sib: + left_sib.contents.append(parent.contents[parent_index - 1]) + left_sib.contents.extend(self.contents) + if self.children: + left_sib.children.extend(self.children) + parent.contents.pop(parent_index - 1) + parent.children.pop(parent_index) + else: + self.contents.append(parent.contents[parent_index]) + self.contents.extend(right_sib.contents) + if self.children: + self.children.extend(right_sib.children) + parent.contents.pop(parent_index) + parent.children.pop(parent_index + 1) + + if len(parent.contents) < minimum: + if ancestors: + # parent is not the root + parent.grow(ancestors) + elif not parent.contents: + # parent is root, and its now empty + self.tree._root = left_sib or self + + def split(self): + center = len(self.contents) // 2 + median = self.contents[center] + sibling = type(self)( + self.tree, + self.contents[center + 1:], + self.children[center + 1:]) + self.contents = self.contents[:center] + self.children = self.children[:center + 1] + return sibling, median + + def insert(self, index, item, ancestors): + self.contents.insert(index, item) + if len(self.contents) > self.tree.order: + self.shrink(ancestors) + + def remove(self, index, ancestors): + minimum = self.tree.order // 2 + + if self.children: + # try promoting from the right subtree first, + # but only if it won't have to resize + additional_ancestors = [(self, index + 1)] + descendent = self.children[index + 1] + while descendent.children: + additional_ancestors.append((descendent, 0)) + descendent = descendent.children[0] + if len(descendent.contents) > minimum: + ancestors.extend(additional_ancestors) + self.contents[index] = descendent.contents[0] + descendent.remove(0, ancestors) + return + + # fall back to the left child + additional_ancestors = [(self, index)] + descendent = self.children[index] + while descendent.children: + additional_ancestors.append( + (descendent, len(descendent.children) - 1)) + descendent = descendent.children[-1] + ancestors.extend(additional_ancestors) + self.contents[index] = descendent.contents[-1] + descendent.remove(len(descendent.children) - 1, ancestors) + else: + self.contents.pop(index) + if len(self.contents) < minimum and ancestors: + self.grow(ancestors) + +class BTree(object): + BRANCH = LEAF = _BNode + + def __init__(self, order): + self.order = order + self._root = self._bottom = self.LEAF(self) + + def _path_to(self, item): + current = self._root + ancestry = [] + + while getattr(current, "children", None): + index = bisect.bisect_left(current.contents, item) + ancestry.append((current, index)) + if index < len(current.contents) \ + and current.contents[index] == item: + return ancestry + current = current.children[index] + + index = bisect.bisect_left(current.contents, item) + ancestry.append((current, index)) + present = index < len(current.contents) + present = present and current.contents[index] == item + + return ancestry + + def _present(self, item, ancestors): + last, index = ancestors[-1] + return index < len(last.contents) and last.contents[index] == item + + def insert(self, item): + ancestors = self._path_to(item) + node, index = ancestors[-1] + while getattr(node, "children", None): + node = node.children[index] + index = bisect.bisect_left(node.contents, item) + ancestors.append((node, index)) + node, index = ancestors.pop() + node.insert(index, item, ancestors) + + def remove(self, item): + ancestors = self._path_to(item) + + if self._present(item, ancestors): + node, index = ancestors.pop() + node.remove(index, ancestors) + # else: + # raise ValueError("%r not in %s" % (item, self.__class__.__name__)) + + def __contains__(self, item): + return self._present(item, self._path_to(item)) + + def __iter__(self): + def _recurse(node): + if node.children: + for child, item in zip(node.children, node.contents): + for child_item in _recurse(child): + yield child_item + yield item + for child_item in _recurse(node.children[-1]): + yield child_item + else: + for item in node.contents: + yield item + + for item in _recurse(self._root): + yield item + + def __repr__(self): + def recurse(node, accum, depth): + accum.append((" " * depth) + repr(node)) + for node in getattr(node, "children", []): + recurse(node, accum, depth + 1) + + accum = [] + recurse(self._root, accum, 0) + return "\n".join(accum) + + @classmethod + def bulkload(cls, items, order): + tree = object.__new__(cls) + tree.order = order + + leaves = tree._build_bulkloaded_leaves(items) + tree._build_bulkloaded_branches(leaves) + + return tree + + def _build_bulkloaded_leaves(self, items): + minimum = self.order // 2 + leaves, seps = [[]], [] + + for item in items: + if len(leaves[-1]) < self.order: + leaves[-1].append(item) + else: + seps.append(item) + leaves.append([]) + + if len(leaves[-1]) < minimum and seps: + last_two = leaves[-2] + [seps.pop()] + leaves[-1] + leaves[-2] = last_two[:minimum] + leaves[-1] = last_two[minimum + 1:] + seps.append(last_two[minimum]) + + return [self.LEAF(self, contents=node) for node in leaves], seps + + def _build_bulkloaded_branches(self, (leaves, seps)): + minimum = self.order // 2 + levels = [leaves] + + while len(seps) > self.order + 1: + items, nodes, seps = seps, [[]], [] + + for item in items: + if len(nodes[-1]) < self.order: + nodes[-1].append(item) + else: + seps.append(item) + nodes.append([]) + + if len(nodes[-1]) < minimum and seps: + last_two = nodes[-2] + [seps.pop()] + nodes[-1] + nodes[-2] = last_two[:minimum] + nodes[-1] = last_two[minimum + 1:] + seps.append(last_two[minimum]) + + offset = 0 + for i, node in enumerate(nodes): + children = levels[-1][offset:offset + len(node) + 1] + nodes[i] = self.BRANCH(self, contents=node, children=children) + offset += len(node) + 1 + + levels.append(nodes) + + self._root = self.BRANCH(self, contents=seps, children=levels[-1]) + + +OPS = [BTree.__contains__] * 98 + [BTree.insert, BTree.remove] + + +def task(id, tree, ops): + print "start task with %s ops" % ops + r = random.Random() + r.seed(id) + thread_local.rnd = r + + for _ in xrange(ops): + op = r.choice(OPS) + elem = r.randint(1, 10000) + with atomic: + op(tree, elem) + + print "task ended" + + +def chunks(l, n): + """ Yield successive n-sized chunks from l. """ + for i in xrange(0, len(l), n): + yield l[i:i+n] + + + +def run(threads=2, operations=2000000): + threads = int(threads) + operations = int(operations) + + thread_local.rnd = random + + tree = BTree(20) + for _ in xrange(1000): + tree.insert(random.randint(1, 1000)) + + c_len = operations // threads + fs = [] + for i in xrange(threads): + fs.append(Future(task, i, tree, c_len)) + for f in fs: + f() + + # print "tree:" + # print tree + + + + + +if __name__ == '__main__': + run() From noreply at buildbot.pypy.org Wed Apr 9 20:33:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 20:33:24 +0200 (CEST) Subject: [pypy-commit] pypy default: minor cleanup/pep8 Message-ID: <20140409183324.088B41D2351@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70500:c54befdbeb4b Date: 2014-04-09 11:30 -0700 http://bitbucket.org/pypy/pypy/changeset/c54befdbeb4b/ Log: minor cleanup/pep8 diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -2,27 +2,31 @@ Logic to find sys.executable and the initial sys.path containing the stdlib """ -import sys +import errno import os import stat -import errno +import sys + from rpython.rlib import rpath from rpython.rlib.objectmodel import we_are_translated + from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -platform = sys.platform IS_WINDOWS = sys.platform == 'win32' + def find_executable(executable): """ - Return the absolute path of the executable, by looking into PATH and the - current directory. If it cannot be found, return ''. + Return the absolute path of the executable, by looking into PATH and + the current directory. If it cannot be found, return ''. """ - if we_are_translated() and IS_WINDOWS and not executable.lower().endswith('.exe'): + if (we_are_translated() and IS_WINDOWS and + not executable.lower().endswith('.exe')): executable += '.exe' if os.sep in executable or (IS_WINDOWS and ':' in executable): - pass # the path is already more than just an executable name + # the path is already more than just an executable name + pass else: path = os.environ.get('PATH') if path: @@ -35,15 +39,15 @@ # 'sys.executable' should not end up being an non-existing file; # just use '' in this case. (CPython issue #7774) - if not os.path.isfile(executable): - executable = '' - return executable + return executable if os.path.isfile(executable) else '' + def _readlink_maybe(filename): if not IS_WINDOWS: return os.readlink(filename) raise NotImplementedError + def resolvedirof(filename): filename = rpath.rabspath(filename) dirname = rpath.rabspath(os.path.join(filename, '..')) @@ -56,36 +60,37 @@ return resolvedirof(os.path.join(dirname, link)) return dirname + def find_stdlib(state, executable): """ Find and compute the stdlib path, starting from the directory where - ``executable`` is and going one level up until we find it. Return a tuple - (path, prefix), where ``prefix`` is the root directory which contains the - stdlib. - If it cannot be found, return (None, None). + ``executable`` is and going one level up until we find it. Return a + tuple (path, prefix), where ``prefix`` is the root directory which + contains the stdlib. If it cannot be found, return (None, None). """ - if executable == '': - executable = 'pypy-c' - search = executable + search = 'pypy-c' if executable == '' else executable while True: dirname = resolvedirof(search) if dirname == search: - return None, None # not found :-( + return None, None # not found :-( newpath = compute_stdlib_path_maybe(state, dirname) if newpath is not None: return newpath, dirname search = dirname # walk to the parent directory + def _checkdir(path): st = os.stat(path) if not stat.S_ISDIR(st[0]): raise OSError(errno.ENOTDIR, path) + def compute_stdlib_path(state, prefix): """ - Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` must at - least contain a directory called ``lib-python/X.Y`` and another one called - ``lib_pypy``. If they cannot be found, it raises OSError. + Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` + must at least contain a directory called ``lib-python/X.Y`` and + another one called ``lib_pypy``. If they cannot be found, it raises + OSError. """ from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d' % (CPYTHON_VERSION[0], @@ -111,41 +116,42 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if platform != 'win32': - importlist.append(os.path.join(python_std_lib, 'plat-'+platform)) - if platform == 'darwin': + if not IS_WINDOWS: + importlist.append(os.path.join(python_std_lib, 'plat-' + sys.platform)) + if sys.platform == 'darwin': platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) return importlist + def compute_stdlib_path_maybe(state, prefix): - """ - Return the stdlib path rooted at ``prefix``, or None if it cannot be - found. + """Return the stdlib path rooted at ``prefix``, or None if it cannot + be found. """ try: return compute_stdlib_path(state, prefix) except OSError: return None + @unwrap_spec(executable='str0') def pypy_find_executable(space, executable): return space.wrap(find_executable(executable)) + @unwrap_spec(filename='str0') def pypy_resolvedirof(space, filename): return space.wrap(resolvedirof(filename)) + @unwrap_spec(executable='str0') def pypy_find_stdlib(space, executable): path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - else: - space.setitem(space.sys.w_dict, space.wrap('prefix'), - space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) - return space.newlist([space.wrap(p) for p in path]) + space.setitem(space.sys.w_dict, space.wrap('prefix'), space.wrap(prefix)) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), + space.wrap(prefix)) + return space.newlist([space.wrap(p) for p in path]) From noreply at buildbot.pypy.org Wed Apr 9 20:33:25 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 20:33:25 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: use cpython 3.3's test which we already adhere to. 3.2.5's test seems bogus Message-ID: <20140409183325.38EBC1D2351@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70501:455e6ff76097 Date: 2014-04-08 17:14 -0700 http://bitbucket.org/pypy/pypy/changeset/455e6ff76097/ Log: use cpython 3.3's test which we already adhere to. 3.2.5's test seems bogus (cpython issue15533) diff --git a/lib-python/3/test/test_subprocess.py b/lib-python/3/test/test_subprocess.py --- a/lib-python/3/test/test_subprocess.py +++ b/lib-python/3/test/test_subprocess.py @@ -311,7 +311,8 @@ def test_executable_without_cwd(self): # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. - self._assert_cwd('', "somethingyoudonthave", executable=sys.executable) + self._assert_cwd(os.getcwd(), "somethingyoudonthave", + executable=sys.executable) def test_stdin_pipe(self): # stdin redirection From noreply at buildbot.pypy.org Wed Apr 9 20:33:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 20:33:26 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140409183326.663FD1D2351@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70502:b7063430977e Date: 2014-04-09 11:32 -0700 http://bitbucket.org/pypy/pypy/changeset/b7063430977e/ Log: merge default diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -459,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,11 +15,11 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. Thanks to all donors -for crowd-funding the work so far! Please have a look at the 2nd call -for donation (*not ready yet*) +This work was done mostly by Remi Meier and Armin Rigo. Thanks to all +donors for crowd-funding the work so far! Please have a look at the +`2nd call for donation`_. -.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -2,27 +2,31 @@ Logic to find sys.executable and the initial sys.path containing the stdlib """ -import sys +import errno import os import stat -import errno +import sys + from rpython.rlib import rpath from rpython.rlib.objectmodel import we_are_translated + from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -platform = sys.platform IS_WINDOWS = sys.platform == 'win32' + def find_executable(executable): """ - Return the absolute path of the executable, by looking into PATH and the - current directory. If it cannot be found, return ''. + Return the absolute path of the executable, by looking into PATH and + the current directory. If it cannot be found, return ''. """ - if we_are_translated() and IS_WINDOWS and not executable.lower().endswith('.exe'): + if (we_are_translated() and IS_WINDOWS and + not executable.lower().endswith('.exe')): executable += '.exe' if os.sep in executable or (IS_WINDOWS and ':' in executable): - pass # the path is already more than just an executable name + # the path is already more than just an executable name + pass else: path = os.environ.get('PATH') if path: @@ -35,15 +39,15 @@ # 'sys.executable' should not end up being an non-existing file; # just use '' in this case. (CPython issue #7774) - if not os.path.isfile(executable): - executable = '' - return executable + return executable if os.path.isfile(executable) else '' + def _readlink_maybe(filename): if not IS_WINDOWS: return os.readlink(filename) raise NotImplementedError + def resolvedirof(filename): filename = rpath.rabspath(filename) dirname = rpath.rabspath(os.path.join(filename, '..')) @@ -56,36 +60,37 @@ return resolvedirof(os.path.join(dirname, link)) return dirname + def find_stdlib(state, executable): """ Find and compute the stdlib path, starting from the directory where - ``executable`` is and going one level up until we find it. Return a tuple - (path, prefix), where ``prefix`` is the root directory which contains the - stdlib. - If it cannot be found, return (None, None). + ``executable`` is and going one level up until we find it. Return a + tuple (path, prefix), where ``prefix`` is the root directory which + contains the stdlib. If it cannot be found, return (None, None). """ - if executable == '': - executable = 'pypy-c' - search = executable + search = 'pypy-c' if executable == '' else executable while True: dirname = resolvedirof(search) if dirname == search: - return None, None # not found :-( + return None, None # not found :-( newpath = compute_stdlib_path_maybe(state, dirname) if newpath is not None: return newpath, dirname search = dirname # walk to the parent directory + def _checkdir(path): st = os.stat(path) if not stat.S_ISDIR(st[0]): raise OSError(errno.ENOTDIR, path) + def compute_stdlib_path(state, prefix): """ - Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` must at - least contain a directory called ``lib-python/X.Y`` and another one called - ``lib_pypy``. If they cannot be found, it raises OSError. + Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` + must at least contain a directory called ``lib-python/X.Y`` and + another one called ``lib_pypy``. If they cannot be found, it raises + OSError. """ from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d' % CPYTHON_VERSION[0] @@ -110,41 +115,42 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if platform != 'win32': - importlist.append(os.path.join(python_std_lib, 'plat-'+platform)) - if platform == 'darwin': + if not IS_WINDOWS: + importlist.append(os.path.join(python_std_lib, 'plat-' + sys.platform)) + if sys.platform == 'darwin': platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) return importlist + def compute_stdlib_path_maybe(state, prefix): - """ - Return the stdlib path rooted at ``prefix``, or None if it cannot be - found. + """Return the stdlib path rooted at ``prefix``, or None if it cannot + be found. """ try: return compute_stdlib_path(state, prefix) except OSError: return None + @unwrap_spec(executable='str0') def pypy_find_executable(space, executable): return space.wrap(find_executable(executable)) + @unwrap_spec(filename='str0') def pypy_resolvedirof(space, filename): return space.wrap(resolvedirof(filename)) + @unwrap_spec(executable='str0') def pypy_find_stdlib(space, executable): path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - else: - space.setitem(space.sys.w_dict, space.wrap('prefix'), - space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) - return space.newlist([space.wrap(p) for p in path]) + space.setitem(space.sys.w_dict, space.wrap('prefix'), space.wrap(prefix)) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), + space.wrap(prefix)) + return space.newlist([space.wrap(p) for p in path]) From noreply at buildbot.pypy.org Wed Apr 9 20:33:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 20:33:27 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: merge py3k Message-ID: <20140409183327.966561D2351@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70503:712bbcb26dce Date: 2014-04-09 11:32 -0700 http://bitbucket.org/pypy/pypy/changeset/712bbcb26dce/ Log: merge py3k diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -459,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,11 +15,11 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. Thanks to all donors -for crowd-funding the work so far! Please have a look at the 2nd call -for donation (*not ready yet*) +This work was done mostly by Remi Meier and Armin Rigo. Thanks to all +donors for crowd-funding the work so far! Please have a look at the +`2nd call for donation`_. -.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -2,27 +2,31 @@ Logic to find sys.executable and the initial sys.path containing the stdlib """ -import sys +import errno import os import stat -import errno +import sys + from rpython.rlib import rpath from rpython.rlib.objectmodel import we_are_translated + from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -platform = sys.platform IS_WINDOWS = sys.platform == 'win32' + def find_executable(executable): """ - Return the absolute path of the executable, by looking into PATH and the - current directory. If it cannot be found, return ''. + Return the absolute path of the executable, by looking into PATH and + the current directory. If it cannot be found, return ''. """ - if we_are_translated() and IS_WINDOWS and not executable.lower().endswith('.exe'): + if (we_are_translated() and IS_WINDOWS and + not executable.lower().endswith('.exe')): executable += '.exe' if os.sep in executable or (IS_WINDOWS and ':' in executable): - pass # the path is already more than just an executable name + # the path is already more than just an executable name + pass else: path = os.environ.get('PATH') if path: @@ -35,15 +39,15 @@ # 'sys.executable' should not end up being an non-existing file; # just use '' in this case. (CPython issue #7774) - if not os.path.isfile(executable): - executable = '' - return executable + return executable if os.path.isfile(executable) else '' + def _readlink_maybe(filename): if not IS_WINDOWS: return os.readlink(filename) raise NotImplementedError + def resolvedirof(filename): filename = rpath.rabspath(filename) dirname = rpath.rabspath(os.path.join(filename, '..')) @@ -56,36 +60,37 @@ return resolvedirof(os.path.join(dirname, link)) return dirname + def find_stdlib(state, executable): """ Find and compute the stdlib path, starting from the directory where - ``executable`` is and going one level up until we find it. Return a tuple - (path, prefix), where ``prefix`` is the root directory which contains the - stdlib. - If it cannot be found, return (None, None). + ``executable`` is and going one level up until we find it. Return a + tuple (path, prefix), where ``prefix`` is the root directory which + contains the stdlib. If it cannot be found, return (None, None). """ - if executable == '': - executable = 'pypy-c' - search = executable + search = 'pypy-c' if executable == '' else executable while True: dirname = resolvedirof(search) if dirname == search: - return None, None # not found :-( + return None, None # not found :-( newpath = compute_stdlib_path_maybe(state, dirname) if newpath is not None: return newpath, dirname search = dirname # walk to the parent directory + def _checkdir(path): st = os.stat(path) if not stat.S_ISDIR(st[0]): raise OSError(errno.ENOTDIR, path) + def compute_stdlib_path(state, prefix): """ - Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` must at - least contain a directory called ``lib-python/X.Y`` and another one called - ``lib_pypy``. If they cannot be found, it raises OSError. + Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` + must at least contain a directory called ``lib-python/X.Y`` and + another one called ``lib_pypy``. If they cannot be found, it raises + OSError. """ from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d' % CPYTHON_VERSION[0] @@ -110,41 +115,42 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if platform != 'win32': - importlist.append(os.path.join(python_std_lib, 'plat-'+platform)) - if platform == 'darwin': + if not IS_WINDOWS: + importlist.append(os.path.join(python_std_lib, 'plat-' + sys.platform)) + if sys.platform == 'darwin': platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) return importlist + def compute_stdlib_path_maybe(state, prefix): - """ - Return the stdlib path rooted at ``prefix``, or None if it cannot be - found. + """Return the stdlib path rooted at ``prefix``, or None if it cannot + be found. """ try: return compute_stdlib_path(state, prefix) except OSError: return None + @unwrap_spec(executable='str0') def pypy_find_executable(space, executable): return space.wrap(find_executable(executable)) + @unwrap_spec(filename='str0') def pypy_resolvedirof(space, filename): return space.wrap(resolvedirof(filename)) + @unwrap_spec(executable='str0') def pypy_find_stdlib(space, executable): path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - else: - space.setitem(space.sys.w_dict, space.wrap('prefix'), - space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) - return space.newlist([space.wrap(p) for p in path]) + space.setitem(space.sys.w_dict, space.wrap('prefix'), space.wrap(prefix)) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), + space.wrap(prefix)) + return space.newlist([space.wrap(p) for p in path]) From noreply at buildbot.pypy.org Wed Apr 9 20:40:55 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 20:40:55 +0200 (CEST) Subject: [pypy-commit] pypy default: nitpicking Message-ID: <20140409184055.475371C022D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70504:e5b9edc29efe Date: 2014-04-09 11:39 -0700 http://bitbucket.org/pypy/pypy/changeset/e5b9edc29efe/ Log: nitpicking diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -151,7 +151,7 @@ path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - space.setitem(space.sys.w_dict, space.wrap('prefix'), space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) + w_prefix = space.wrap(prefix) + space.setitem(space.sys.w_dict, space.wrap('prefix'), w_prefix) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), w_prefix) return space.newlist([space.wrap(p) for p in path]) From noreply at buildbot.pypy.org Wed Apr 9 20:49:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 9 Apr 2014 20:49:31 +0200 (CEST) Subject: [pypy-commit] pypy default: call check_not_dir in fdopen Message-ID: <20140409184931.65F401D23C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70505:fcb0695ec986 Date: 2014-04-09 14:47 -0400 http://bitbucket.org/pypy/pypy/changeset/fcb0695ec986/ Log: call check_not_dir in fdopen diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -137,6 +137,7 @@ self.check_mode_ok(mode) stream = streamio.fdopen_as_stream(fd, mode, buffering, signal_checker(self.space)) + self.check_not_dir(fd) self.fdopenstream(stream, fd, mode) def direct_close(self): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -305,6 +305,13 @@ finally: __builtins__.file = _file + def test_fdopen_directory(self): + import errno + os = self.posix + fd = os.open('/', os.O_RDONLY) + exc = raises(IOError, os.fdopen, fd, 'r') + assert exc.value.errno == errno.EISDIR + def test_getcwd(self): assert isinstance(self.posix.getcwd(), str) assert isinstance(self.posix.getcwdu(), unicode) @@ -340,7 +347,6 @@ else: assert (unicode, u) in typed_result - def test_access(self): pdir = self.pdir + '/file1' posix = self.posix @@ -351,7 +357,6 @@ if sys.platform != "win32": assert not posix.access(pdir, posix.X_OK) - def test_times(self): """ posix.times() should return a five-tuple giving float-representations @@ -1156,8 +1161,8 @@ res = os.system(cmd) assert res == 0 + class AppTestPosixUnicode: - def setup_class(cls): cls.space = space cls.w_posix = space.appexec([], GET_POSIX) @@ -1198,6 +1203,7 @@ except OSError: pass + class AppTestUnicodeFilename: def setup_class(cls): ufilename = (unicode(udir.join('test_unicode_filename_')) + From noreply at buildbot.pypy.org Wed Apr 9 20:52:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 20:52:57 +0200 (CEST) Subject: [pypy-commit] pypy default: fix translation Message-ID: <20140409185257.791211C022D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70506:da3975c63a3e Date: 2014-04-09 11:52 -0700 http://bitbucket.org/pypy/pypy/changeset/da3975c63a3e/ Log: fix translation diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -13,7 +13,9 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -IS_WINDOWS = sys.platform == 'win32' +PLATFORM = sys.platform +_MACOSX = sys.platform == 'darwin' +_WIN32 = sys.platform == 'win32' def find_executable(executable): @@ -21,10 +23,10 @@ Return the absolute path of the executable, by looking into PATH and the current directory. If it cannot be found, return ''. """ - if (we_are_translated() and IS_WINDOWS and + if (we_are_translated() and _WIN32 and not executable.lower().endswith('.exe')): executable += '.exe' - if os.sep in executable or (IS_WINDOWS and ':' in executable): + if os.sep in executable or (_WIN32 and ':' in executable): # the path is already more than just an executable name pass else: @@ -43,7 +45,7 @@ def _readlink_maybe(filename): - if not IS_WINDOWS: + if not _WIN32: return os.readlink(filename) raise NotImplementedError @@ -116,9 +118,9 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if not IS_WINDOWS: - importlist.append(os.path.join(python_std_lib, 'plat-' + sys.platform)) - if sys.platform == 'darwin': + if not _WIN32: + importlist.append(os.path.join(python_std_lib, 'plat-' + PLATFORM)) + if _MACOSX: platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -84,7 +84,7 @@ assert find_executable('pypy') == a.join('pypy') # monkeypatch.setattr(initpath, 'we_are_translated', lambda: True) - monkeypatch.setattr(initpath, 'IS_WINDOWS', True) + monkeypatch.setattr(initpath, '_WIN32', True) monkeypatch.setenv('PATH', str(a)) a.join('pypy.exe').ensure(file=True) assert find_executable('pypy') == a.join('pypy.exe') From noreply at buildbot.pypy.org Wed Apr 9 21:22:50 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:50 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: start using generate_guards to implement generalization_of Message-ID: <20140409192250.719F01C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70507:605961f3e5de Date: 2014-04-09 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/605961f3e5de/ Log: start using generate_guards to implement generalization_of diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -57,7 +57,7 @@ info1.position = 0 info2 = NotVirtualStateInfo(value2) info2.position = 0 - return info1.generalization_of(info2, {}, {}) + return info1.generalization_of(info2, {}, {}, LLtypeMixin.cpu) assert isgeneral(OptValue(BoxInt()), OptValue(ConstInt(7))) assert not isgeneral(OptValue(ConstInt(7)), OptValue(BoxInt())) @@ -66,9 +66,10 @@ nonnull = OptValue(BoxPtr()) nonnull.make_nonnull(0) knownclass = OptValue(BoxPtr()) - knownclass.make_constant_class(ConstPtr(self.someptr1), 0) + clsbox = LLtypeMixin.cpu.ts.cls_of_box(BoxPtr(LLtypeMixin.myptr)) + knownclass.make_constant_class(clsbox, 0) const = OptValue(BoxPtr) - const.make_constant_class(ConstPtr(self.someptr1), 0) + const.make_constant_class(clsbox, 0) const.make_constant(ConstPtr(self.someptr1)) inorder = [ptr, nonnull, knownclass, const] for i in range(len(inorder)): @@ -179,12 +180,14 @@ def check_no_guards(self, info1, info2, box_or_value=None): value, _ = self._box_or_value(box_or_value) + info1.position = info2.position = 0 guards = [] info1.generate_guards(info2, value, self.cpu, guards, {}) assert not guards def check_invalid(self, info1, info2, box_or_value=None): value, _ = self._box_or_value(box_or_value) + info1.position = info2.position = 0 guards = [] with py.test.raises(InvalidLoop): info1.generate_guards(info2, value, self.cpu, guards, {}) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -388,7 +388,8 @@ #debug_start('jit-log-virtualstate') #virtual_state.debug_print('Closed loop with ') bad = {} - if not virtual_state.generalization_of(final_virtual_state, bad): + if not virtual_state.generalization_of(final_virtual_state, bad, + cpu=self.optimizer.cpu): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop #final_virtual_state.debug_print("Bad virtual state at end of loop, ", @@ -558,7 +559,9 @@ bad = {} debugmsg = 'Did not match ' - if target.virtual_state.generalization_of(virtual_state, bad): + if target.virtual_state.generalization_of( + virtual_state, bad, + cpu = self.optimizer.cpu): ok = True debugmsg = 'Matched ' else: diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -17,16 +17,15 @@ class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): position = -1 - def generalization_of(self, other, renum, bad): - assert self.position != -1 - if self.position in renum: - result = renum[self.position] == other.position - else: - renum[self.position] = other.position - result = self.generalization_of_renumbering_done(other, renum, bad) - if not result: - bad[self] = bad[other] = None - return result + def generalization_of(self, other, renum, bad, cpu=None): + # cpu can be None for testing only + guards = [] + try: + self.generate_guards(other, None, cpu, guards, renum, bad) + assert not guards + return True + except InvalidLoop: + return False def generate_guards(self, other, value, cpu, extra_guards, renum, bad=None): """ generate guards (output in the list extra_guards) that make runtime @@ -38,6 +37,7 @@ the value) as a guiding heuristic whether making such guards makes sense. if None is passed in for value, no guard is ever generated, and this function degenerates to a generalization check.""" + assert self.position != -1 if bad is None: bad = {} assert value is None or isinstance(value, OptValue) @@ -93,43 +93,27 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def generalization_of_renumbering_done(self, other, renum, bad): + def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): if not self._generalization_of_structpart(other): - return False + raise InvalidLoop("different kinds of structs") assert isinstance(other, AbstractVirtualStructStateInfo) assert len(self.fielddescrs) == len(self.fieldstate) assert len(other.fielddescrs) == len(other.fieldstate) + if value is not None: + assert isinstance(value, virtualize.AbstractVirtualStructValue) + assert value.is_virtual() + if len(self.fielddescrs) != len(other.fielddescrs): - return False + raise InvalidLoop("field descrs don't match") for i in range(len(self.fielddescrs)): if other.fielddescrs[i] is not self.fielddescrs[i]: - return False - if not self.fieldstate[i].generalization_of(other.fieldstate[i], - renum, bad): - return False - - return True - - - def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): - if not self._generalization_of_structpart(other): - raise InvalidLoop("XXX") - - assert isinstance(other, AbstractVirtualStructStateInfo) - assert len(self.fielddescrs) == len(self.fieldstate) - assert len(other.fielddescrs) == len(other.fieldstate) - assert isinstance(value, virtualize.AbstractVirtualStructValue) - assert value.is_virtual() - - if len(self.fielddescrs) != len(other.fielddescrs): - raise InvalidLoop("XXX") - - for i in range(len(self.fielddescrs)): - if other.fielddescrs[i] is not self.fielddescrs[i]: - raise InvalidLoop("XXX") - v = value._fields[self.fielddescrs[i]] # must be there + raise InvalidLoop("field descrs don't match") + if value is not None: + v = value._fields[self.fielddescrs[i]] # must be there + else: + v = None self.fieldstate[i].generate_guards(other.fieldstate[i], v, cpu, extra_guards, renum) @@ -187,18 +171,18 @@ def __init__(self, arraydescr): self.arraydescr = arraydescr - def generalization_of_renumbering_done(self, other, renum, bad): + def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): if not isinstance(other, VArrayStateInfo): - return False + raise InvalidLoop("other is not an array") if self.arraydescr is not other.arraydescr: - return False + raise InvalidLoop("other is a different kind of array") if len(self.fieldstate) != len(other.fieldstate): - return False + raise InvalidLoop("other has a different length") for i in range(len(self.fieldstate)): - if not self.fieldstate[i].generalization_of(other.fieldstate[i], - renum, bad): - return False - return True + # XXX value + self.fieldstate[i].generate_guards(other.fieldstate[i], + None, cpu, extra_guards, + renum, bad) def enum_forced_boxes(self, boxes, value, optimizer): if not isinstance(value, virtualize.VArrayValue): @@ -226,27 +210,29 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def generalization_of_renumbering_done(self, other, renum, bad): + def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + # XXX this needs a test in test_virtualstate!!! if not isinstance(other, VArrayStructStateInfo): - return False + raise InvalidLoop("other is not an VArrayStructStateInfo") if not self.arraydescr is not other.arraydescr: - return False + raise InvalidLoop("other is a different kind of array") if len(self.fielddescrs) != len(other.fielddescrs): - return False + raise InvalidLoop("other has a different length") p = 0 for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): - return False + raise InvalidLoop("other has a different length") for j in range(len(self.fielddescrs[i])): if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: - return False - if not self.fieldstate[p].generalization_of(other.fieldstate[p], - renum, bad): - return False + raise InvalidLoop("other has a different length") + self.fieldstate[p].generate_guards(other.fieldstate[p], + None, # XXX + cpu, + extra_guards, + renum, bad) p += 1 - return True def _enum(self, virtual_state): for s in self.fieldstate: @@ -315,17 +301,15 @@ return True def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): - if value is not None: + if value is None or self.is_opaque: + box = None # generating guards for opaque pointers isn't safe + else: box = value.box - else: - box = None if not isinstance(other, NotVirtualStateInfo): raise InvalidLoop('The VirtualStates does not match as a ' + 'virtual appears where a pointer is needed ' + 'and it is too late to force it.') - if self.is_opaque: - raise InvalidLoop('Generating guards for opaque pointers is not safe') if self.lenbound and not self.lenbound.generalization_of(other.lenbound): raise InvalidLoop() From noreply at buildbot.pypy.org Wed Apr 9 21:22:51 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:51 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: oops Message-ID: <20140409192251.99DBE1C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70508:2c1378852b5c Date: 2014-04-09 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/2c1378852b5c/ Log: oops diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -450,13 +450,13 @@ for s in state: s.enum(self) - def generalization_of(self, other, bad=None): + def generalization_of(self, other, bad=None, cpu=None): if bad is None: bad = {} assert len(self.state) == len(other.state) renum = {} for i in range(len(self.state)): - if not self.state[i].generalization_of(other.state[i], renum, bad): + if not self.state[i].generalization_of(other.state[i], renum, bad, cpu=None): return False return True From noreply at buildbot.pypy.org Wed Apr 9 21:22:52 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:52 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: this should no longer be necessary Message-ID: <20140409192252.B2FF31C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70509:e4ad4a4796df Date: 2014-04-09 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/e4ad4a4796df/ Log: this should no longer be necessary diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -576,18 +576,7 @@ debugmsg = 'Guarded to match ' except InvalidLoop: pass - if ok and not patchguardop: - # if we can't patch the guards to go to a good target, no use - # in jumping to this label - for guard in extra_guards: - if guard.is_guard(): - ok = False - break - else: - for shop in target.short_preamble[1:]: - if shop.is_guard(): - ok = False - break + assert patchguardop is not None or (extra_guards == [] and len(target.short_preamble) == 1) target.virtual_state.debug_print(debugmsg, bad) From noreply at buildbot.pypy.org Wed Apr 9 21:22:53 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:53 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: no need to call generalization_of any more Message-ID: <20140409192253.C69461C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70510:fbac02bbbb90 Date: 2014-04-09 16:45 +0200 http://bitbucket.org/pypy/pypy/changeset/fbac02bbbb90/ Log: no need to call generalization_of any more diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -559,23 +559,21 @@ bad = {} debugmsg = 'Did not match ' - if target.virtual_state.generalization_of( - virtual_state, bad, - cpu = self.optimizer.cpu): + try: + cpu = self.optimizer.cpu + target.virtual_state.generate_guards(virtual_state, + values, + cpu, + extra_guards) + ok = True - debugmsg = 'Matched ' - else: - try: - cpu = self.optimizer.cpu - target.virtual_state.generate_guards(virtual_state, - values, - cpu, - extra_guards) + if extra_guards: + debugmsg = 'Guarded to match ' + else: + debugmsg = 'Matched ' + except InvalidLoop: + continue - ok = True - debugmsg = 'Guarded to match ' - except InvalidLoop: - pass assert patchguardop is not None or (extra_guards == [] and len(target.short_preamble) == 1) target.virtual_state.debug_print(debugmsg, bad) From noreply at buildbot.pypy.org Wed Apr 9 21:22:54 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:54 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: finally kill this method Message-ID: <20140409192254.D9F9B1C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70511:fb14e13b500c Date: 2014-04-09 16:52 +0200 http://bitbucket.org/pypy/pypy/changeset/fb14e13b500c/ Log: finally kill this method diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -277,34 +277,14 @@ self.position_in_notvirtuals = -1 self.lenbound = value.lenbound - def generalization_of_renumbering_done(self, other, renum, bad): - # XXX This will always retrace instead of forcing anything which - # might be what we want sometimes? - if not isinstance(other, NotVirtualStateInfo): - return False - if other.level < self.level: - return False - if self.level == LEVEL_CONSTANT: - if not self.constbox.same_constant(other.constbox): - return False - elif self.level == LEVEL_KNOWNCLASS: - if not self.known_class.same_constant(other.known_class): - return False - elif self.level == LEVEL_NONNULL: - if other.constbox and not other.constbox.nonnull(): - return False - - if not self.intbound.contains_bound(other.intbound): - return False - if self.lenbound: - return self.lenbound.generalization_of(other.lenbound) - return True def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): if value is None or self.is_opaque: box = None # generating guards for opaque pointers isn't safe else: box = value.box + # XXX This will always retrace instead of forcing anything which + # might be what we want sometimes? if not isinstance(other, NotVirtualStateInfo): raise InvalidLoop('The VirtualStates does not match as a ' + 'virtual appears where a pointer is needed ' + From noreply at buildbot.pypy.org Wed Apr 9 21:22:55 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:55 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: there's an equivalent test in test_nonvirtual_all_combinations Message-ID: <20140409192255.EC8031C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70512:9d79b10afe1e Date: 2014-04-09 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/9d79b10afe1e/ Log: there's an equivalent test in test_nonvirtual_all_combinations diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -435,21 +435,6 @@ vstate2.generate_guards(vstate3, [value, value], self.cpu, guards) - def test_known_value_virtualstate(self): - box1 = BoxInt(1) - box2 = BoxInt(1) - value1 = OptValue(box1) - value2 = OptValue(box2) - value1.make_constant(ConstInt(1)) - vstate1 = VirtualState([NotVirtualStateInfo(value1)]) - vstate2 = VirtualState([NotVirtualStateInfo(value2)]) - expected = """ - [i0] - guard_value(i0, 1) [] - """ - guards = [] - vstate1.generate_guards(vstate2, [value2], self.cpu, guards) - self.compare(guards, expected, [box2]) def test_generate_guards_on_virtual_fields_matches(self): innervalue1 = OptValue(self.nodebox) From noreply at buildbot.pypy.org Wed Apr 9 21:22:57 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:57 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: change the generate_guards interface to not have to pass around four variables Message-ID: <20140409192257.0BD1C1C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70513:25dfebb4f119 Date: 2014-04-09 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/25dfebb4f119/ Log: change the generate_guards interface to not have to pass around four variables (not quite happy with the result) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -2,7 +2,7 @@ import py from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ - VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes + VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes, GenerateGuardState from rpython.jit.metainterp.optimizeopt.optimizer import OptValue from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from rpython.rtyper.lltypesystem import lltype, llmemory @@ -162,9 +162,9 @@ if inputargs is None: inputargs = [box] info1.position = info2.position = 0 - guards = [] - info1.generate_guards(info2, value, self.cpu, guards, {}) - self.compare(guards, expected, inputargs) + state = GenerateGuardState(self.cpu) + info1.generate_guards(info2, value, state) + self.compare(state.extra_guards, expected, inputargs) def compare(self, guards, expected, inputargs): loop = self.parse(expected) @@ -181,16 +181,17 @@ def check_no_guards(self, info1, info2, box_or_value=None): value, _ = self._box_or_value(box_or_value) info1.position = info2.position = 0 - guards = [] - info1.generate_guards(info2, value, self.cpu, guards, {}) - assert not guards + state = GenerateGuardState(self.cpu) + info1.generate_guards(info2, value, state) + assert not state.extra_guards def check_invalid(self, info1, info2, box_or_value=None): value, _ = self._box_or_value(box_or_value) info1.position = info2.position = 0 guards = [] with py.test.raises(InvalidLoop): - info1.generate_guards(info2, value, self.cpu, guards, {}) + state = GenerateGuardState(self.cpu) + info1.generate_guards(info2, value, state) def test_nonvirtual_all_combinations(self): # set up infos @@ -422,18 +423,15 @@ guard_nonnull(p0) [] guard_class(p0, ConstClass(node_vtable)) [] """ - guards = [] - vstate1.generate_guards(vstate2, [value, value], self.cpu, guards) - self.compare(guards, expected, [self.nodebox]) + state = vstate1.generate_guards(vstate2, [value, value], self.cpu) + self.compare(state.extra_guards, expected, [self.nodebox]) with py.test.raises(InvalidLoop): - guards = [] vstate1.generate_guards(vstate3, [value, value], - self.cpu, guards) + self.cpu) with py.test.raises(InvalidLoop): - guards = [] vstate2.generate_guards(vstate3, [value, value], - self.cpu, guards) + self.cpu) def test_generate_guards_on_virtual_fields_matches(self): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -557,26 +557,26 @@ ok = False extra_guards = [] - bad = {} debugmsg = 'Did not match ' try: cpu = self.optimizer.cpu - target.virtual_state.generate_guards(virtual_state, - values, - cpu, - extra_guards) + state = target.virtual_state.generate_guards(virtual_state, + values, + cpu) ok = True + extra_guards = state.extra_guards if extra_guards: debugmsg = 'Guarded to match ' else: debugmsg = 'Matched ' except InvalidLoop: + target.virtual_state.debug_print(debugmsg, {}) # XXX continue assert patchguardop is not None or (extra_guards == [] and len(target.short_preamble) == 1) - target.virtual_state.debug_print(debugmsg, bad) + target.virtual_state.debug_print(debugmsg, {}) if ok: debug_stop('jit-log-virtualstate') diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -13,6 +13,18 @@ class BadVirtualState(Exception): pass +class GenerateGuardState(object): + def __init__(self, cpu=None, guards=None, renum=None, bad=None): + self.cpu = cpu + if guards is None: + guards = [] + self.extra_guards = guards + if renum is None: + renum = {} + self.renum = renum + if bad is None: + bad = {} + self.bad = bad class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): position = -1 @@ -20,14 +32,15 @@ def generalization_of(self, other, renum, bad, cpu=None): # cpu can be None for testing only guards = [] + state = GenerateGuardState(cpu, guards, renum, bad) try: - self.generate_guards(other, None, cpu, guards, renum, bad) + self.generate_guards(other, None, state) assert not guards return True except InvalidLoop: return False - def generate_guards(self, other, value, cpu, extra_guards, renum, bad=None): + def generate_guards(self, other, value, state): """ generate guards (output in the list extra_guards) that make runtime values of the shape other match the shape of self. if that's not possible, InvalidLoop is thrown and bad gets keys set which parts of @@ -37,26 +50,24 @@ the value) as a guiding heuristic whether making such guards makes sense. if None is passed in for value, no guard is ever generated, and this function degenerates to a generalization check.""" + assert value is None or isinstance(value, OptValue) assert self.position != -1 - if bad is None: - bad = {} - assert value is None or isinstance(value, OptValue) - if self.position in renum: - if renum[self.position] != other.position: - bad[self] = bad[other] = None + if self.position in state.renum: + if state.renum[self.position] != other.position: + state.bad[self] = state.bad[other] = None raise InvalidLoop('The numbering of the virtual states does not ' + 'match. This means that two virtual fields ' + 'have been set to the same Box in one of the ' + 'virtual states but not in the other.') else: - renum[self.position] = other.position + state.renum[self.position] = other.position try: - self._generate_guards(other, value, cpu, extra_guards, renum, bad) + self._generate_guards(other, value, state) except InvalidLoop: - bad[self] = bad[other] = None + state.bad[self] = state.bad[other] = None raise - def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + def _generate_guards(self, other, value, state): raise InvalidLoop('Generating guards for making the VirtualStates ' + 'at hand match have not been implemented') @@ -93,7 +104,7 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + def _generate_guards(self, other, value, state): if not self._generalization_of_structpart(other): raise InvalidLoop("different kinds of structs") @@ -114,7 +125,7 @@ v = value._fields[self.fielddescrs[i]] # must be there else: v = None - self.fieldstate[i].generate_guards(other.fieldstate[i], v, cpu, extra_guards, renum) + self.fieldstate[i].generate_guards(other.fieldstate[i], v, state) def _generalization_of_structpart(self, other): @@ -171,7 +182,7 @@ def __init__(self, arraydescr): self.arraydescr = arraydescr - def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + def _generate_guards(self, other, value, state): if not isinstance(other, VArrayStateInfo): raise InvalidLoop("other is not an array") if self.arraydescr is not other.arraydescr: @@ -181,8 +192,7 @@ for i in range(len(self.fieldstate)): # XXX value self.fieldstate[i].generate_guards(other.fieldstate[i], - None, cpu, extra_guards, - renum, bad) + None, state) def enum_forced_boxes(self, boxes, value, optimizer): if not isinstance(value, virtualize.VArrayValue): @@ -210,7 +220,7 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + def _generate_guards(self, other, value, state): # XXX this needs a test in test_virtualstate!!! if not isinstance(other, VArrayStructStateInfo): raise InvalidLoop("other is not an VArrayStructStateInfo") @@ -229,9 +239,7 @@ raise InvalidLoop("other has a different length") self.fieldstate[p].generate_guards(other.fieldstate[p], None, # XXX - cpu, - extra_guards, - renum, bad) + state) p += 1 def _enum(self, virtual_state): @@ -278,7 +286,7 @@ self.lenbound = value.lenbound - def _generate_guards(self, other, value, cpu, extra_guards, renum, bad): + def _generate_guards(self, other, value, state): if value is None or self.is_opaque: box = None # generating guards for opaque pointers isn't safe else: @@ -291,6 +299,8 @@ 'and it is too late to force it.') + extra_guards = state.extra_guards + cpu = state.cpu if self.lenbound and not self.lenbound.generalization_of(other.lenbound): raise InvalidLoop() @@ -440,14 +450,13 @@ return False return True - def generate_guards(self, other, values, cpu, extra_guards, bad=None): - if bad is None: - bad = {} + def generate_guards(self, other, values, cpu): assert len(self.state) == len(other.state) == len(values) - renum = {} + state = GenerateGuardState(cpu) for i in range(len(self.state)): self.state[i].generate_guards(other.state[i], values[i], - cpu, extra_guards, renum, bad) + state) + return state def make_inputargs(self, values, optimizer, keyboxes=False): if optimizer.optearlyforce: From noreply at buildbot.pypy.org Wed Apr 9 21:22:58 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:58 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: fully remove generalization_of, only leave in on VirtualState Message-ID: <20140409192258.166F81C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70514:f6b8d4578f9b Date: 2014-04-09 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/f6b8d4578f9b/ Log: fully remove generalization_of, only leave in on VirtualState diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -14,138 +14,8 @@ from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import ResOperation, rop -class TestBasic: - someptr1 = LLtypeMixin.myptr - someptr2 = LLtypeMixin.myptr2 +class BaseTestGenerateGuards(BaseTest): - def test_position_generalization(self): - def postest(info1, info2): - info1.position = 0 - assert info1.generalization_of(info1, {}, {}) - info2.position = 0 - assert info1.generalization_of(info2, {}, {}) - info2.position = 1 - renum = {} - assert info1.generalization_of(info2, renum, {}) - assert renum == {0:1} - assert info1.generalization_of(info2, {0:1}, {}) - assert info1.generalization_of(info2, {1:1}, {}) - bad = {} - assert not info1.generalization_of(info2, {0:0}, bad) - assert info1 in bad and info2 in bad - - for BoxType in (BoxInt, BoxFloat, BoxPtr): - info1 = NotVirtualStateInfo(OptValue(BoxType())) - info2 = NotVirtualStateInfo(OptValue(BoxType())) - postest(info1, info2) - - info1, info2 = VArrayStateInfo(42), VArrayStateInfo(42) - info1.fieldstate = info2.fieldstate = [] - postest(info1, info2) - - info1, info2 = VStructStateInfo(42, []), VStructStateInfo(42, []) - info1.fieldstate = info2.fieldstate = [] - postest(info1, info2) - - info1, info2 = VirtualStateInfo(ConstInt(42), []), VirtualStateInfo(ConstInt(42), []) - info1.fieldstate = info2.fieldstate = [] - postest(info1, info2) - - def test_NotVirtualStateInfo_generalization(self): - def isgeneral(value1, value2): - info1 = NotVirtualStateInfo(value1) - info1.position = 0 - info2 = NotVirtualStateInfo(value2) - info2.position = 0 - return info1.generalization_of(info2, {}, {}, LLtypeMixin.cpu) - - assert isgeneral(OptValue(BoxInt()), OptValue(ConstInt(7))) - assert not isgeneral(OptValue(ConstInt(7)), OptValue(BoxInt())) - - ptr = OptValue(BoxPtr()) - nonnull = OptValue(BoxPtr()) - nonnull.make_nonnull(0) - knownclass = OptValue(BoxPtr()) - clsbox = LLtypeMixin.cpu.ts.cls_of_box(BoxPtr(LLtypeMixin.myptr)) - knownclass.make_constant_class(clsbox, 0) - const = OptValue(BoxPtr) - const.make_constant_class(clsbox, 0) - const.make_constant(ConstPtr(self.someptr1)) - inorder = [ptr, nonnull, knownclass, const] - for i in range(len(inorder)): - for j in range(i, len(inorder)): - assert isgeneral(inorder[i], inorder[j]) - if i != j: - assert not isgeneral(inorder[j], inorder[i]) - - value1 = OptValue(BoxInt()) - value2 = OptValue(BoxInt()) - value2.intbound.make_lt(IntBound(10, 10)) - assert isgeneral(value1, value2) - assert not isgeneral(value2, value1) - - assert isgeneral(OptValue(ConstInt(7)), OptValue(ConstInt(7))) - S = lltype.GcStruct('S') - foo = lltype.malloc(S) - fooref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) - assert isgeneral(OptValue(ConstPtr(fooref)), - OptValue(ConstPtr(fooref))) - - value1 = OptValue(BoxPtr()) - value1.make_nonnull(None) - value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) - assert not isgeneral(value1, value2) - - def test_field_matching_generalization(self): - const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) - const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) - const1.position = const2.position = 1 - assert not const1.generalization_of(const2, {}, {}) - assert not const2.generalization_of(const1, {}, {}) - - def fldtst(info1, info2): - info1.position = info2.position = 0 - info1.fieldstate = [const1] - info2.fieldstate = [const2] - assert not info1.generalization_of(info2, {}, {}) - assert not info2.generalization_of(info1, {}, {}) - assert info1.generalization_of(info1, {}, {}) - assert info2.generalization_of(info2, {}, {}) - fldtst(VArrayStateInfo(42), VArrayStateInfo(42)) - fldtst(VStructStateInfo(42, [7]), VStructStateInfo(42, [7])) - fldtst(VirtualStateInfo(ConstInt(42), [7]), VirtualStateInfo(ConstInt(42), [7])) - - def test_known_class_generalization(self): - knownclass1 = OptValue(BoxPtr()) - knownclass1.make_constant_class(ConstPtr(self.someptr1), 0) - info1 = NotVirtualStateInfo(knownclass1) - info1.position = 0 - knownclass2 = OptValue(BoxPtr()) - knownclass2.make_constant_class(ConstPtr(self.someptr1), 0) - info2 = NotVirtualStateInfo(knownclass2) - info2.position = 0 - assert info1.generalization_of(info2, {}, {}) - assert info2.generalization_of(info1, {}, {}) - - knownclass3 = OptValue(BoxPtr()) - knownclass3.make_constant_class(ConstPtr(self.someptr2), 0) - info3 = NotVirtualStateInfo(knownclass3) - info3.position = 0 - assert not info1.generalization_of(info3, {}, {}) - assert not info2.generalization_of(info3, {}, {}) - assert not info3.generalization_of(info2, {}, {}) - assert not info3.generalization_of(info1, {}, {}) - - - def test_circular_generalization(self): - for info in (VArrayStateInfo(42), VStructStateInfo(42, [7]), - VirtualStateInfo(ConstInt(42), [7])): - info.position = 0 - info.fieldstate = [info] - assert info.generalization_of(info, {}, {}) - - -class BaseTestGenerateGuards(BaseTest): def _box_or_value(self, box_or_value=None): if box_or_value is None: return None, None @@ -178,22 +48,162 @@ assert equaloplists(guards, loop.operations, False, boxmap) - def check_no_guards(self, info1, info2, box_or_value=None): + def check_no_guards(self, info1, info2, box_or_value=None, state=None): value, _ = self._box_or_value(box_or_value) - info1.position = info2.position = 0 - state = GenerateGuardState(self.cpu) + if info1.position == -1: + info1.position = 0 + if info2.position == -1: + info2.position = 0 + if state is None: + state = GenerateGuardState(self.cpu) info1.generate_guards(info2, value, state) assert not state.extra_guards + return state - def check_invalid(self, info1, info2, box_or_value=None): + def check_invalid(self, info1, info2, box_or_value=None, state=None): value, _ = self._box_or_value(box_or_value) - info1.position = info2.position = 0 - guards = [] + if info1.position == -1: + info1.position = 0 + if info2.position == -1: + info2.position = 0 + if state is None: + state = GenerateGuardState(self.cpu) with py.test.raises(InvalidLoop): - state = GenerateGuardState(self.cpu) info1.generate_guards(info2, value, state) - def test_nonvirtual_all_combinations(self): + + def test_position_generalization(self): + def postest(info1, info2): + info1.position = 0 + self.check_no_guards(info1, info1) + info2.position = 0 + self.check_no_guards(info1, info2) + info2.position = 1 + state = self.check_no_guards(info1, info2) + assert state.renum == {0:1} + + assert self.check_no_guards(info1, info2, state=state) + + # feed fake renums + state.renum = {1: 1} + self.check_no_guards(info1, info2, state=state) + + state.renum = {0: 0} + self.check_invalid(info1, info2, state=state) + assert info1 in state.bad and info2 in state.bad + + for BoxType in (BoxInt, BoxFloat, BoxPtr): + info1 = NotVirtualStateInfo(OptValue(BoxType())) + info2 = NotVirtualStateInfo(OptValue(BoxType())) + postest(info1, info2) + + info1, info2 = VArrayStateInfo(42), VArrayStateInfo(42) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + info1, info2 = VStructStateInfo(42, []), VStructStateInfo(42, []) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + info1, info2 = VirtualStateInfo(ConstInt(42), []), VirtualStateInfo(ConstInt(42), []) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + def test_NotVirtualStateInfo_generalization(self): + def isgeneral(value1, value2): + info1 = NotVirtualStateInfo(value1) + info1.position = 0 + info2 = NotVirtualStateInfo(value2) + info2.position = 0 + return VirtualState([info1]).generalization_of(VirtualState([info2]), cpu=self.cpu) + + assert isgeneral(OptValue(BoxInt()), OptValue(ConstInt(7))) + assert not isgeneral(OptValue(ConstInt(7)), OptValue(BoxInt())) + + ptr = OptValue(BoxPtr()) + nonnull = OptValue(BoxPtr()) + nonnull.make_nonnull(0) + knownclass = OptValue(BoxPtr()) + clsbox = self.cpu.ts.cls_of_box(BoxPtr(self.myptr)) + knownclass.make_constant_class(clsbox, 0) + const = OptValue(BoxPtr) + const.make_constant_class(clsbox, 0) + const.make_constant(ConstPtr(self.myptr)) + inorder = [ptr, nonnull, knownclass, const] + for i in range(len(inorder)): + for j in range(i, len(inorder)): + assert isgeneral(inorder[i], inorder[j]) + if i != j: + assert not isgeneral(inorder[j], inorder[i]) + + value1 = OptValue(BoxInt()) + value2 = OptValue(BoxInt()) + value2.intbound.make_lt(IntBound(10, 10)) + assert isgeneral(value1, value2) + assert not isgeneral(value2, value1) + + assert isgeneral(OptValue(ConstInt(7)), OptValue(ConstInt(7))) + S = lltype.GcStruct('S') + foo = lltype.malloc(S) + fooref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) + assert isgeneral(OptValue(ConstPtr(fooref)), + OptValue(ConstPtr(fooref))) + + value1 = OptValue(BoxPtr()) + value1.make_nonnull(None) + value2 = OptValue(ConstPtr(self.nullptr)) + assert not isgeneral(value1, value2) + + def test_field_matching_generalization(self): + const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) + const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) + const1.position = const2.position = 1 + self.check_invalid(const1, const2) + self.check_invalid(const2, const1) + + def fldtst(info1, info2): + info1.position = info2.position = 0 + info1.fieldstate = [const1] + info2.fieldstate = [const2] + self.check_invalid(info1, info2) + self.check_invalid(info2, info1) + self.check_no_guards(info1, info1) + self.check_no_guards(info2, info2) + fldtst(VArrayStateInfo(42), VArrayStateInfo(42)) + fldtst(VStructStateInfo(42, [7]), VStructStateInfo(42, [7])) + fldtst(VirtualStateInfo(ConstInt(42), [7]), VirtualStateInfo(ConstInt(42), [7])) + + def test_known_class_generalization(self): + knownclass1 = OptValue(BoxPtr()) + knownclass1.make_constant_class(ConstPtr(self.myptr), 0) + info1 = NotVirtualStateInfo(knownclass1) + info1.position = 0 + knownclass2 = OptValue(BoxPtr()) + knownclass2.make_constant_class(ConstPtr(self.myptr), 0) + info2 = NotVirtualStateInfo(knownclass2) + info2.position = 0 + self.check_no_guards(info1, info2) + self.check_no_guards(info2, info1) + + knownclass3 = OptValue(BoxPtr()) + knownclass3.make_constant_class(ConstPtr(self.myptr2), 0) + info3 = NotVirtualStateInfo(knownclass3) + info3.position = 0 + self.check_invalid(info1, info3) + self.check_invalid(info2, info3) + self.check_invalid(info3, info2) + self.check_invalid(info3, info1) + + + def test_circular_generalization(self): + for info in (VArrayStateInfo(42), VStructStateInfo(42, [7]), + VirtualStateInfo(ConstInt(42), [7])): + info.position = 0 + info.fieldstate = [info] + self.check_no_guards(info, info) + + + def test_generate_guards_nonvirtual_all_combinations(self): # set up infos unknown_val = OptValue(self.nodebox) unknownnull_val = OptValue(BoxPtr(self.nullptr)) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -29,17 +29,6 @@ class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): position = -1 - def generalization_of(self, other, renum, bad, cpu=None): - # cpu can be None for testing only - guards = [] - state = GenerateGuardState(cpu, guards, renum, bad) - try: - self.generate_guards(other, None, state) - assert not guards - return True - except InvalidLoop: - return False - def generate_guards(self, other, value, state): """ generate guards (output in the list extra_guards) that make runtime values of the shape other match the shape of self. if that's not @@ -441,13 +430,13 @@ s.enum(self) def generalization_of(self, other, bad=None, cpu=None): - if bad is None: - bad = {} + state = GenerateGuardState(cpu=cpu, bad=bad) assert len(self.state) == len(other.state) - renum = {} - for i in range(len(self.state)): - if not self.state[i].generalization_of(other.state[i], renum, bad, cpu=None): - return False + try: + for i in range(len(self.state)): + self.state[i].generate_guards(other.state[i], None, state) + except InvalidLoop: + return False return True def generate_guards(self, other, values, cpu): From noreply at buildbot.pypy.org Wed Apr 9 21:22:59 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:22:59 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: use our own exception Message-ID: <20140409192259.378241C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70515:51fe2237fbdc Date: 2014-04-09 18:24 +0200 http://bitbucket.org/pypy/pypy/changeset/51fe2237fbdc/ Log: use our own exception diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -1,8 +1,8 @@ from __future__ import with_statement import py -from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ - VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes, GenerateGuardState + VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes, GenerateGuardState, \ + VirtualStatesCantMatch from rpython.jit.metainterp.optimizeopt.optimizer import OptValue from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from rpython.rtyper.lltypesystem import lltype, llmemory @@ -68,7 +68,7 @@ info2.position = 0 if state is None: state = GenerateGuardState(self.cpu) - with py.test.raises(InvalidLoop): + with py.test.raises(VirtualStatesCantMatch): info1.generate_guards(info2, value, state) @@ -363,7 +363,7 @@ guard_true(i2) [] """ self.guards(info1, info2, value1, expected) - py.test.raises(InvalidLoop, self.guards, + py.test.raises(VirtualStatesCantMatch, self.guards, info1, info2, BoxInt(50), expected) @@ -379,7 +379,7 @@ guard_class(p0, ConstClass(node_vtable)) [] """ self.guards(info1, info2, self.nodebox, expected) - py.test.raises(InvalidLoop, self.guards, + py.test.raises(VirtualStatesCantMatch, self.guards, info1, info2, BoxPtr(), expected) def test_known_class_value(self): @@ -436,10 +436,10 @@ state = vstate1.generate_guards(vstate2, [value, value], self.cpu) self.compare(state.extra_guards, expected, [self.nodebox]) - with py.test.raises(InvalidLoop): + with py.test.raises(VirtualStatesCantMatch): vstate1.generate_guards(vstate3, [value, value], self.cpu) - with py.test.raises(InvalidLoop): + with py.test.raises(VirtualStatesCantMatch): vstate2.generate_guards(vstate3, [value, value], self.cpu) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -6,7 +6,8 @@ from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, Optimization -from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState +from rpython.jit.metainterp.optimizeopt.virtualstate import (VirtualStateAdder, + ShortBoxes, BadVirtualState, VirtualStatesCantMatch) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.resume import Snapshot from rpython.rlib.debug import debug_print, debug_start, debug_stop @@ -570,8 +571,8 @@ debugmsg = 'Guarded to match ' else: debugmsg = 'Matched ' - except InvalidLoop: - target.virtual_state.debug_print(debugmsg, {}) # XXX + except VirtualStatesCantMatch, e: + target.virtual_state.debug_print(debugmsg, e.state.bad) continue assert patchguardop is not None or (extra_guards == [] and len(target.short_preamble) == 1) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -1,6 +1,5 @@ from rpython.jit.metainterp import resume from rpython.jit.metainterp.history import BoxInt, ConstInt, BoxPtr, Const -from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt import virtualize from rpython.jit.metainterp.optimizeopt.intutils import IntUnbounded from rpython.jit.metainterp.optimizeopt.optimizer import (LEVEL_CONSTANT, @@ -13,6 +12,11 @@ class BadVirtualState(Exception): pass +class VirtualStatesCantMatch(Exception): + def __init__(self, msg='?', state=None): + self.msg = msg + self.state = state + class GenerateGuardState(object): def __init__(self, cpu=None, guards=None, renum=None, bad=None): self.cpu = cpu @@ -32,8 +36,8 @@ def generate_guards(self, other, value, state): """ generate guards (output in the list extra_guards) that make runtime values of the shape other match the shape of self. if that's not - possible, InvalidLoop is thrown and bad gets keys set which parts of - the state are the problem. + possible, VirtualStatesCantMatch is thrown and bad gets keys set which + parts of the state are the problem. the function can peek into value (and particularly also the boxes in the value) as a guiding heuristic whether making such guards makes @@ -44,21 +48,26 @@ if self.position in state.renum: if state.renum[self.position] != other.position: state.bad[self] = state.bad[other] = None - raise InvalidLoop('The numbering of the virtual states does not ' + - 'match. This means that two virtual fields ' + - 'have been set to the same Box in one of the ' + - 'virtual states but not in the other.') + raise VirtualStatesCantMatch( + 'The numbering of the virtual states does not ' + + 'match. This means that two virtual fields ' + + 'have been set to the same Box in one of the ' + + 'virtual states but not in the other.', + state) else: state.renum[self.position] = other.position try: self._generate_guards(other, value, state) - except InvalidLoop: + except VirtualStatesCantMatch, e: state.bad[self] = state.bad[other] = None - raise + if e.state is None: + e.state = state + raise e def _generate_guards(self, other, value, state): - raise InvalidLoop('Generating guards for making the VirtualStates ' + - 'at hand match have not been implemented') + raise VirtualStatesCantMatch( + 'Generating guards for making the VirtualStates ' + + 'at hand match have not been implemented') def enum_forced_boxes(self, boxes, value, optimizer): raise NotImplementedError @@ -95,7 +104,7 @@ def _generate_guards(self, other, value, state): if not self._generalization_of_structpart(other): - raise InvalidLoop("different kinds of structs") + raise VirtualStatesCantMatch("different kinds of structs") assert isinstance(other, AbstractVirtualStructStateInfo) assert len(self.fielddescrs) == len(self.fieldstate) @@ -105,11 +114,11 @@ assert value.is_virtual() if len(self.fielddescrs) != len(other.fielddescrs): - raise InvalidLoop("field descrs don't match") + raise VirtualStatesCantMatch("field descrs don't match") for i in range(len(self.fielddescrs)): if other.fielddescrs[i] is not self.fielddescrs[i]: - raise InvalidLoop("field descrs don't match") + raise VirtualStatesCantMatch("field descrs don't match") if value is not None: v = value._fields[self.fielddescrs[i]] # must be there else: @@ -173,11 +182,11 @@ def _generate_guards(self, other, value, state): if not isinstance(other, VArrayStateInfo): - raise InvalidLoop("other is not an array") + raise VirtualStatesCantMatch("other is not an array") if self.arraydescr is not other.arraydescr: - raise InvalidLoop("other is a different kind of array") + raise VirtualStatesCantMatch("other is a different kind of array") if len(self.fieldstate) != len(other.fieldstate): - raise InvalidLoop("other has a different length") + raise VirtualStatesCantMatch("other has a different length") for i in range(len(self.fieldstate)): # XXX value self.fieldstate[i].generate_guards(other.fieldstate[i], @@ -212,20 +221,20 @@ def _generate_guards(self, other, value, state): # XXX this needs a test in test_virtualstate!!! if not isinstance(other, VArrayStructStateInfo): - raise InvalidLoop("other is not an VArrayStructStateInfo") + raise VirtualStatesCantMatch("other is not an VArrayStructStateInfo") if not self.arraydescr is not other.arraydescr: - raise InvalidLoop("other is a different kind of array") + raise VirtualStatesCantMatch("other is a different kind of array") if len(self.fielddescrs) != len(other.fielddescrs): - raise InvalidLoop("other has a different length") + raise VirtualStatesCantMatch("other has a different length") p = 0 for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): - raise InvalidLoop("other has a different length") + raise VirtualStatesCantMatch("other has a different length") for j in range(len(self.fielddescrs[i])): if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: - raise InvalidLoop("other has a different length") + raise VirtualStatesCantMatch("other is a different kind of array") self.fieldstate[p].generate_guards(other.fieldstate[p], None, # XXX state) @@ -283,15 +292,16 @@ # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? if not isinstance(other, NotVirtualStateInfo): - raise InvalidLoop('The VirtualStates does not match as a ' + - 'virtual appears where a pointer is needed ' + - 'and it is too late to force it.') + raise VirtualStatesCantMatch( + 'The VirtualStates does not match as a ' + + 'virtual appears where a pointer is needed ' + + 'and it is too late to force it.') extra_guards = state.extra_guards cpu = state.cpu if self.lenbound and not self.lenbound.generalization_of(other.lenbound): - raise InvalidLoop() + raise VirtualStatesCantMatch("length bound does not match") if self.level == LEVEL_UNKNOWN: if other.level == LEVEL_UNKNOWN: @@ -311,7 +321,7 @@ extra_guards.append(op) return else: - raise InvalidLoop() + raise VirtualStatesCantMatch("other not known to be nonnull") elif other.level == LEVEL_NONNULL: return elif other.level == LEVEL_KNOWNCLASS: @@ -320,7 +330,7 @@ assert other.level == LEVEL_CONSTANT assert other.constbox if not other.constbox.nonnull(): - raise InvalidLoop("XXX") + raise VirtualStatesCantMatch("constant is null") return elif self.level == LEVEL_KNOWNCLASS: @@ -333,39 +343,39 @@ extra_guards.append(op) return else: - raise InvalidLoop() + raise VirtualStatesCantMatch("other's class is unknown") elif other.level == LEVEL_NONNULL: if box and self.known_class.same_constant(cpu.ts.cls_of_box(box)): op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) extra_guards.append(op) return else: - raise InvalidLoop() + raise VirtualStatesCantMatch("other's class is unknown") elif other.level == LEVEL_KNOWNCLASS: if self.known_class.same_constant(other.known_class): return - raise InvalidLoop() + raise VirtualStatesCantMatch("classes don't match") else: assert other.level == LEVEL_CONSTANT if (other.constbox.nonnull() and self.known_class.same_constant(cpu.ts.cls_of_box(other.constbox))): return else: - raise InvalidLoop() + raise VirtualStatesCantMatch("classes don't match") else: assert self.level == LEVEL_CONSTANT if other.level == LEVEL_CONSTANT: if self.constbox.same_constant(other.constbox): return - raise InvalidLoop() + raise VirtualStatesCantMatch("different constants") if box is not None and self.constbox.same_constant(box.constbox()): op = ResOperation(rop.GUARD_VALUE, [box, self.constbox], None) extra_guards.append(op) return else: - raise InvalidLoop() - raise InvalidLoop("XXX") + raise VirtualStatesCantMatch("other not constant") + assert 0, "unreachable" def _generate_guards_intbounds(self, other, box, extra_guards): if self.intbound.contains_bound(other.intbound): @@ -376,7 +386,7 @@ # optimized away when emitting them self.intbound.make_guards(box, extra_guards) return - raise InvalidLoop("intbounds don't match") + raise VirtualStatesCantMatch("intbounds don't match") def enum_forced_boxes(self, boxes, value, optimizer): if self.level == LEVEL_CONSTANT: @@ -435,7 +445,7 @@ try: for i in range(len(self.state)): self.state[i].generate_guards(other.state[i], None, state) - except InvalidLoop: + except VirtualStatesCantMatch: return False return True From noreply at buildbot.pypy.org Wed Apr 9 21:23:00 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:23:00 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: use helper Message-ID: <20140409192300.4A2331C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70516:aef86000e35a Date: 2014-04-09 19:31 +0200 http://bitbucket.org/pypy/pypy/changeset/aef86000e35a/ Log: use helper diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -363,8 +363,7 @@ guard_true(i2) [] """ self.guards(info1, info2, value1, expected) - py.test.raises(VirtualStatesCantMatch, self.guards, - info1, info2, BoxInt(50), expected) + self.check_invalid(info1, info2, BoxInt(50)) def test_known_class(self): @@ -379,8 +378,7 @@ guard_class(p0, ConstClass(node_vtable)) [] """ self.guards(info1, info2, self.nodebox, expected) - py.test.raises(VirtualStatesCantMatch, self.guards, - info1, info2, BoxPtr(), expected) + self.check_invalid(info1, info2, BoxPtr()) def test_known_class_value(self): value1 = OptValue(self.nodebox) From noreply at buildbot.pypy.org Wed Apr 9 21:23:01 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:23:01 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: yay, another completely untested case: Message-ID: <20140409192301.5AA651C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70517:e43da5c8b730 Date: 2014-04-09 19:36 +0200 http://bitbucket.org/pypy/pypy/changeset/e43da5c8b730/ Log: yay, another completely untested case: match a value with an intbound against a constant diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -365,6 +365,16 @@ self.guards(info1, info2, value1, expected) self.check_invalid(info1, info2, BoxInt(50)) + def test_intbounds_constant(self): + value1 = OptValue(BoxInt(15)) + value1.intbound.make_ge(IntBound(0, 10)) + value1.intbound.make_le(IntBound(20, 30)) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(ConstInt(10000))) + self.check_invalid(info1, info2) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(ConstInt(11))) + self.check_no_guards(info1, info2) def test_known_class(self): value1 = OptValue(self.nodebox) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -304,10 +304,9 @@ raise VirtualStatesCantMatch("length bound does not match") if self.level == LEVEL_UNKNOWN: - if other.level == LEVEL_UNKNOWN: - return self._generate_guards_intbounds(other, box, extra_guards) - else: - return # matches everything + # confusingly enough, this is done also for pointers + # which have the full range as the "bound", so it always works + return self._generate_guards_intbounds(other, box, extra_guards) # the following conditions often peek into the runtime value that the # box had when tracing. This value is only used as an educated guess. @@ -380,7 +379,7 @@ def _generate_guards_intbounds(self, other, box, extra_guards): if self.intbound.contains_bound(other.intbound): return - if (isinstance(box, BoxInt) and + if (box is not None and isinstance(box, BoxInt) and self.intbound.contains(box.getint())): # this may generate a few more guards than needed, but they are # optimized away when emitting them From noreply at buildbot.pypy.org Wed Apr 9 21:23:02 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 21:23:02 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: dedent Message-ID: <20140409192302.833641C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70518:e5b4fa9311f9 Date: 2014-04-09 19:40 +0200 http://bitbucket.org/pypy/pypy/changeset/e5b4fa9311f9/ Log: dedent diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -555,7 +555,6 @@ for target in cell_token.target_tokens: if not target.virtual_state: continue - ok = False extra_guards = [] debugmsg = 'Did not match ' @@ -565,7 +564,6 @@ values, cpu) - ok = True extra_guards = state.extra_guards if extra_guards: debugmsg = 'Guarded to match ' @@ -579,41 +577,40 @@ target.virtual_state.debug_print(debugmsg, {}) - if ok: - debug_stop('jit-log-virtualstate') + debug_stop('jit-log-virtualstate') - args = target.virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - short_inputargs = target.short_preamble[0].getarglist() - inliner = Inliner(short_inputargs, args) + args = target.virtual_state.make_inputargs(values, self.optimizer, + keyboxes=True) + short_inputargs = target.short_preamble[0].getarglist() + inliner = Inliner(short_inputargs, args) - for guard in extra_guards: - if guard.is_guard(): + for guard in extra_guards: + if guard.is_guard(): + descr = patchguardop.getdescr().clone_if_mutable() + guard.setdescr(descr) + self.optimizer.send_extra_operation(guard) + + try: + # NB: the short_preamble ends with a jump + for shop in target.short_preamble[1:]: + newop = inliner.inline_op(shop) + if newop.is_guard(): descr = patchguardop.getdescr().clone_if_mutable() - guard.setdescr(descr) - self.optimizer.send_extra_operation(guard) - - try: - # NB: the short_preamble ends with a jump - for shop in target.short_preamble[1:]: - newop = inliner.inline_op(shop) - if newop.is_guard(): - descr = patchguardop.getdescr().clone_if_mutable() - newop.setdescr(descr) - self.optimizer.send_extra_operation(newop) - if shop.result in target.assumed_classes: - classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) - if not classbox or not classbox.same_constant(target.assumed_classes[shop.result]): - raise InvalidLoop('The class of an opaque pointer at the end ' + - 'of the bridge does not mach the class ' + - 'it has at the start of the target loop') - except InvalidLoop: - #debug_print("Inlining failed unexpectedly", - # "jumping to preamble instead") - assert cell_token.target_tokens[0].virtual_state is None - jumpop.setdescr(cell_token.target_tokens[0]) - self.optimizer.send_extra_operation(jumpop) - return True + newop.setdescr(descr) + self.optimizer.send_extra_operation(newop) + if shop.result in target.assumed_classes: + classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) + if not classbox or not classbox.same_constant(target.assumed_classes[shop.result]): + raise InvalidLoop('The class of an opaque pointer at the end ' + + 'of the bridge does not mach the class ' + + 'it has at the start of the target loop') + except InvalidLoop: + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True debug_stop('jit-log-virtualstate') return False From noreply at buildbot.pypy.org Wed Apr 9 22:15:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 22:15:17 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140409201517.2DBB81C147D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70519:7eafa2968379 Date: 2014-04-09 11:58 -0700 http://bitbucket.org/pypy/pypy/changeset/7eafa2968379/ Log: merge default diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -333,7 +333,6 @@ if sys.platform != "win32": assert not posix.access(pdir, posix.X_OK) - def test_times(self): """ posix.times() should return a five-tuple giving float-representations @@ -1099,8 +1098,8 @@ res = os.system(cmd) assert res == 0 + class AppTestPosixUnicode: - def setup_class(cls): cls.space = space cls.w_posix = space.appexec([], GET_POSIX) @@ -1141,6 +1140,7 @@ except OSError: pass + class AppTestUnicodeFilename: def setup_class(cls): ufilename = (unicode(udir.join('test_unicode_filename_')) + diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -13,7 +13,9 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -IS_WINDOWS = sys.platform == 'win32' +PLATFORM = sys.platform +_MACOSX = sys.platform == 'darwin' +_WIN32 = sys.platform == 'win32' def find_executable(executable): @@ -21,10 +23,10 @@ Return the absolute path of the executable, by looking into PATH and the current directory. If it cannot be found, return ''. """ - if (we_are_translated() and IS_WINDOWS and + if (we_are_translated() and _WIN32 and not executable.lower().endswith('.exe')): executable += '.exe' - if os.sep in executable or (IS_WINDOWS and ':' in executable): + if os.sep in executable or (_WIN32 and ':' in executable): # the path is already more than just an executable name pass else: @@ -43,7 +45,7 @@ def _readlink_maybe(filename): - if not IS_WINDOWS: + if not _WIN32: return os.readlink(filename) raise NotImplementedError @@ -115,9 +117,9 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if not IS_WINDOWS: - importlist.append(os.path.join(python_std_lib, 'plat-' + sys.platform)) - if sys.platform == 'darwin': + if not _WIN32: + importlist.append(os.path.join(python_std_lib, 'plat-' + PLATFORM)) + if _MACOSX: platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) @@ -150,7 +152,7 @@ path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - space.setitem(space.sys.w_dict, space.wrap('prefix'), space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) + w_prefix = space.wrap(prefix) + space.setitem(space.sys.w_dict, space.wrap('prefix'), w_prefix) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), w_prefix) return space.newlist([space.wrap(p) for p in path]) diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -84,7 +84,7 @@ assert find_executable('pypy') == a.join('pypy') # monkeypatch.setattr(initpath, 'we_are_translated', lambda: True) - monkeypatch.setattr(initpath, 'IS_WINDOWS', True) + monkeypatch.setattr(initpath, '_WIN32', True) monkeypatch.setenv('PATH', str(a)) a.join('pypy.exe').ensure(file=True) assert find_executable('pypy') == a.join('pypy.exe') From noreply at buildbot.pypy.org Wed Apr 9 22:15:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 22:15:18 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: merge py3k Message-ID: <20140409201518.68ACE1C147D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70520:911844b1f9c2 Date: 2014-04-09 11:58 -0700 http://bitbucket.org/pypy/pypy/changeset/911844b1f9c2/ Log: merge py3k diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -333,7 +333,6 @@ if sys.platform != "win32": assert not posix.access(pdir, posix.X_OK) - def test_times(self): """ posix.times() should return a five-tuple giving float-representations @@ -1099,8 +1098,8 @@ res = os.system(cmd) assert res == 0 + class AppTestPosixUnicode: - def setup_class(cls): cls.space = space cls.w_posix = space.appexec([], GET_POSIX) @@ -1141,6 +1140,7 @@ except OSError: pass + class AppTestUnicodeFilename: def setup_class(cls): ufilename = (unicode(udir.join('test_unicode_filename_')) + diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -13,7 +13,9 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -IS_WINDOWS = sys.platform == 'win32' +PLATFORM = sys.platform +_MACOSX = sys.platform == 'darwin' +_WIN32 = sys.platform == 'win32' def find_executable(executable): @@ -21,10 +23,10 @@ Return the absolute path of the executable, by looking into PATH and the current directory. If it cannot be found, return ''. """ - if (we_are_translated() and IS_WINDOWS and + if (we_are_translated() and _WIN32 and not executable.lower().endswith('.exe')): executable += '.exe' - if os.sep in executable or (IS_WINDOWS and ':' in executable): + if os.sep in executable or (_WIN32 and ':' in executable): # the path is already more than just an executable name pass else: @@ -43,7 +45,7 @@ def _readlink_maybe(filename): - if not IS_WINDOWS: + if not _WIN32: return os.readlink(filename) raise NotImplementedError @@ -115,9 +117,9 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if not IS_WINDOWS: - importlist.append(os.path.join(python_std_lib, 'plat-' + sys.platform)) - if sys.platform == 'darwin': + if not _WIN32: + importlist.append(os.path.join(python_std_lib, 'plat-' + PLATFORM)) + if _MACOSX: platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) @@ -150,7 +152,7 @@ path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - space.setitem(space.sys.w_dict, space.wrap('prefix'), space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) + w_prefix = space.wrap(prefix) + space.setitem(space.sys.w_dict, space.wrap('prefix'), w_prefix) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), w_prefix) return space.newlist([space.wrap(p) for p in path]) diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -84,7 +84,7 @@ assert find_executable('pypy') == a.join('pypy') # monkeypatch.setattr(initpath, 'we_are_translated', lambda: True) - monkeypatch.setattr(initpath, 'IS_WINDOWS', True) + monkeypatch.setattr(initpath, '_WIN32', True) monkeypatch.setenv('PATH', str(a)) a.join('pypy.exe').ensure(file=True) assert find_executable('pypy') == a.join('pypy.exe') From noreply at buildbot.pypy.org Wed Apr 9 22:15:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Apr 2014 22:15:19 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: incoming paths are now unicode so their path manipulations should be too -- Message-ID: <20140409201519.8D32C1C147D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70521:8f1f8bf0c093 Date: 2014-04-09 12:23 -0700 http://bitbucket.org/pypy/pypy/changeset/8f1f8bf0c093/ Log: incoming paths are now unicode so their path manipulations should be too -- however rpath doesn't support unicode, so fsencode them for now diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -107,7 +107,7 @@ if state is not None: # 'None' for testing only lib_extensions = os.path.join(lib_pypy, '__extensions__') - state.w_lib_extensions = state.space.wrap(lib_extensions) + state.w_lib_extensions = _w_fsdecode(state.space, lib_extensions) importlist.append(lib_extensions) importlist.append(lib_pypy) @@ -137,22 +137,26 @@ return None - at unwrap_spec(executable='str0') + at unwrap_spec(executable='fsencode') def pypy_find_executable(space, executable): - return space.wrap(find_executable(executable)) + return _w_fsdecode(space, find_executable(executable)) - at unwrap_spec(filename='str0') + at unwrap_spec(filename='fsencode') def pypy_resolvedirof(space, filename): - return space.wrap(resolvedirof(filename)) + return _w_fsdecode(space, resolvedirof(filename)) - at unwrap_spec(executable='str0') + at unwrap_spec(executable='fsencode') def pypy_find_stdlib(space, executable): path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - w_prefix = space.wrap(prefix) + w_prefix = _w_fsdecode(space, prefix) space.setitem(space.sys.w_dict, space.wrap('prefix'), w_prefix) space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), w_prefix) - return space.newlist([space.wrap(p) for p in path]) + return space.newlist([_w_fsdecode(space, p) for p in path]) + + +def _w_fsdecode(space, b): + return space.fsdecode(space.wrapbytes(b)) From noreply at buildbot.pypy.org Wed Apr 9 23:29:51 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 9 Apr 2014 23:29:51 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: share some copy-pasted code Message-ID: <20140409212951.B74571C147D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70522:6b0dfd655d80 Date: 2014-04-09 23:29 +0200 http://bitbucket.org/pypy/pypy/changeset/6b0dfd655d80/ Log: share some copy-pasted code discovered that the short preamble that is being inline can grow in the process (!) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -330,24 +330,8 @@ args[short_inputargs[i]] = jmp_to_short_args[i] self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - i = 1 - while i < len(self.short): - # Note that self.short might be extended during this loop - op = self.short[i] - newop = self.short_inliner.inline_op(op) - if newop.is_guard(): - if not patchguardop: - raise InvalidLoop("would like to have short preamble, but it has a guard and there's no guard_future_condition") - descr = patchguardop.getdescr().clone_if_mutable() - newop.setdescr(descr) - self.optimizer.send_extra_operation(newop) - if op.result in self.short_boxes.assumed_classes: - classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) - assumed_classbox = self.short_boxes.assumed_classes[op.result] - if not classbox or not classbox.same_constant(assumed_classbox): - raise InvalidLoop('Class of opaque pointer needed in short ' + - 'preamble unknown at end of loop') - i += 1 + self._inline_short_preamble(self.short, self.short_inliner, + patchguardop, self.short_boxes.assumed_classes) # Import boxes produced in the preamble but used in the loop newoperations = self.optimizer.get_newoperations() @@ -592,18 +576,7 @@ try: # NB: the short_preamble ends with a jump - for shop in target.short_preamble[1:]: - newop = inliner.inline_op(shop) - if newop.is_guard(): - descr = patchguardop.getdescr().clone_if_mutable() - newop.setdescr(descr) - self.optimizer.send_extra_operation(newop) - if shop.result in target.assumed_classes: - classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) - if not classbox or not classbox.same_constant(target.assumed_classes[shop.result]): - raise InvalidLoop('The class of an opaque pointer at the end ' + - 'of the bridge does not mach the class ' + - 'it has at the start of the target loop') + self._inline_short_preamble(target.short_preamble, inliner, patchguardop, target.assumed_classes) except InvalidLoop: #debug_print("Inlining failed unexpectedly", # "jumping to preamble instead") @@ -614,6 +587,27 @@ debug_stop('jit-log-virtualstate') return False + def _inline_short_preamble(self, short_preamble, inliner, patchguardop, assumed_classes): + i = 1 + # XXX this is intentiontal :-(. short_preamble can change during the + # loop in some cases + while i < len(short_preamble): + shop = short_preamble[i] + newop = inliner.inline_op(shop) + if newop.is_guard(): + if not patchguardop: + raise InvalidLoop("would like to have short preamble, but it has a guard and there's no guard_future_condition") + descr = patchguardop.getdescr().clone_if_mutable() + newop.setdescr(descr) + self.optimizer.send_extra_operation(newop) + if shop.result in assumed_classes: + classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) + if not classbox or not classbox.same_constant(assumed_classes[shop.result]): + raise InvalidLoop('The class of an opaque pointer before the jump ' + + 'does not mach the class ' + + 'it has at the start of the target loop') + i += 1 + class ValueImporter(object): def __init__(self, unroll, value, op): From noreply at buildbot.pypy.org Thu Apr 10 00:02:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 10 Apr 2014 00:02:34 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_zmath on win32 Message-ID: <20140409220234.563001C022D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70523:bf3731320ef2 Date: 2014-04-09 15:01 -0700 http://bitbucket.org/pypy/pypy/changeset/bf3731320ef2/ Log: fix test_zmath on win32 diff --git a/rpython/jit/backend/x86/support.py b/rpython/jit/backend/x86/support.py --- a/rpython/jit/backend/x86/support.py +++ b/rpython/jit/backend/x86/support.py @@ -7,11 +7,12 @@ extra = ['-DPYPY_X86_CHECK_SSE2'] if sys.platform != 'win32': extra += ['-msse2', '-mfpmath=sse'] + else: + extra += ['/arch:SSE2'] else: extra = [] # the -m options above are always on by default on x86-64 -if sys.platform != 'win32': - extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra +extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = extra, From noreply at buildbot.pypy.org Thu Apr 10 00:16:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 10 Apr 2014 00:16:56 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: another BufferError impl. detail Message-ID: <20140409221656.A8B2C1C12F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70524:e7aa6d7e4488 Date: 2014-04-09 15:04 -0700 http://bitbucket.org/pypy/pypy/changeset/e7aa6d7e4488/ Log: another BufferError impl. detail diff --git a/lib-python/3/test/test_re.py b/lib-python/3/test/test_re.py --- a/lib-python/3/test/test_re.py +++ b/lib-python/3/test/test_re.py @@ -1,5 +1,5 @@ from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \ - cpython_only + cpython_only, check_impl_detail import io import re from re import Scanner @@ -25,7 +25,12 @@ # See bug 14212 b = bytearray(b'x') it = re.finditer(b'a', b) - with self.assertRaises(BufferError): + if check_impl_detail(pypy=False): + # PyPy export buffers differently, and allows reallocation + # of the underlying object. + with self.assertRaises(BufferError): + b.extend(b'x'*400) + else: b.extend(b'x'*400) list(it) del it From noreply at buildbot.pypy.org Thu Apr 10 00:16:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 10 Apr 2014 00:16:57 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-3.2.5: close to be merged branch Message-ID: <20140409221657.E24AB1C12F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70525:03f738c0210a Date: 2014-04-09 15:15 -0700 http://bitbucket.org/pypy/pypy/changeset/03f738c0210a/ Log: close to be merged branch From noreply at buildbot.pypy.org Thu Apr 10 00:17:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 10 Apr 2014 00:17:02 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge stdlib-3.2.5 Message-ID: <20140409221702.3CAD21C12F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70526:652e7bcac2c3 Date: 2014-04-09 15:15 -0700 http://bitbucket.org/pypy/pypy/changeset/652e7bcac2c3/ Log: merge stdlib-3.2.5 diff too long, truncating to 2000 out of 27652 lines diff --git a/lib-python/3/__future__.py b/lib-python/3/__future__.py --- a/lib-python/3/__future__.py +++ b/lib-python/3/__future__.py @@ -114,7 +114,7 @@ CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), - (2, 7, 0, "alpha", 0), + (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), diff --git a/lib-python/3/_abcoll.py b/lib-python/3/_abcoll.py --- a/lib-python/3/_abcoll.py +++ b/lib-python/3/_abcoll.py @@ -184,12 +184,12 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return other.__lt__(self) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + return other.__le__(self) def __eq__(self, other): if not isinstance(other, Set): diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py new file mode 100644 --- /dev/null +++ b/lib-python/3/_osx_support.py @@ -0,0 +1,488 @@ +"""Shared OS X support functions.""" + +import os +import re +import sys + +__all__ = [ + 'compiler_fixup', + 'customize_config_vars', + 'customize_compiler', + 'get_platform_osx', +] + +# configuration variables that may contain universal build flags, +# like "-arch" or "-isdkroot", that may need customization for +# the user environment +_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', + 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', + 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', + 'PY_CORE_CFLAGS') + +# configuration variables that may contain compiler calls +_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') + +# prefix added to original configuration variable names +_INITPRE = '_OSX_SUPPORT_INITIAL_' + + +def _find_executable(executable, path=None): + """Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + """ + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + base, ext = os.path.splitext(executable) + + if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): + executable = executable + '.exe' + + if not os.path.isfile(executable): + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None + else: + return executable + + +def _read_output(commandstring): + """Output from succesful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + # tempfile is also not available then. + import contextlib + try: + import tempfile + fp = tempfile.NamedTemporaryFile() + except ImportError: + fp = open("/tmp/_osx_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read().decode('utf-8').strip() if not os.system(cmd) else None + + +def _find_build_tool(toolname): + """Find a build tool on current path or using xcrun""" + return (_find_executable(toolname) + or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) + or '' + ) + +_SYSTEM_VERSION = None + +def _get_system_version(): + """Return the OS X system version as a string""" + # Reading this plist is a documented way to get the system + # version (see the documentation for the Gestalt Manager) + # We avoid using platform.mac_ver to avoid possible bootstrap issues during + # the build of Python itself (distutils is used to build standard library + # extensions). + + global _SYSTEM_VERSION + + if _SYSTEM_VERSION is None: + _SYSTEM_VERSION = '' + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + return _SYSTEM_VERSION + +def _remove_original_values(_config_vars): + """Remove original unmodified values for testing""" + # This is needed for higher-level cross-platform tests of get_platform. + for k in list(_config_vars): + if k.startswith(_INITPRE): + del _config_vars[k] + +def _save_modified_value(_config_vars, cv, newvalue): + """Save modified and original unmodified value of configuration var""" + + oldvalue = _config_vars.get(cv, '') + if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): + _config_vars[_INITPRE + cv] = oldvalue + _config_vars[cv] = newvalue + +def _supports_universal_builds(): + """Returns True if universal builds are supported on this system""" + # As an approximation, we assume that if we are running on 10.4 or above, + # then we are running with an Xcode environment that supports universal + # builds, in particular -isysroot and -arch arguments to the compiler. This + # is in support of allowing 10.4 universal builds to run on 10.3.x systems. + + osx_version = _get_system_version() + if osx_version: + try: + osx_version = tuple(int(i) for i in osx_version.split('.')) + except ValueError: + osx_version = '' + return bool(osx_version >= (10, 4)) if osx_version else False + + +def _find_appropriate_compiler(_config_vars): + """Find appropriate C compiler for extension module builds""" + + # Issue #13590: + # The OSX location for the compiler varies between OSX + # (or rather Xcode) releases. With older releases (up-to 10.5) + # the compiler is in /usr/bin, with newer releases the compiler + # can only be found inside Xcode.app if the "Command Line Tools" + # are not installed. + # + # Futhermore, the compiler that can be used varies between + # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' + # as the compiler, after that 'clang' should be used because + # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that + # miscompiles Python. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + # The CC config var might contain additional arguments. + # Ignore them while searching. + cc = oldcc = _config_vars['CC'].split()[0] + if not _find_executable(cc): + # Compiler is not found on the shell search PATH. + # Now search for clang, first on PATH (if the Command LIne + # Tools have been installed in / or if the user has provided + # another location via CC). If not found, try using xcrun + # to find an uninstalled clang (within a selected Xcode). + + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself (and os.popen is + # implemented on top of subprocess and is therefore not + # usable as well) + + cc = _find_build_tool('clang') + + elif os.path.basename(cc).startswith('gcc'): + # Compiler is GCC, check if it is LLVM-GCC + data = _read_output("'%s' --version" + % (cc.replace("'", "'\"'\"'"),)) + if 'llvm-gcc' in data: + # Found LLVM-GCC, fall back to clang + cc = _find_build_tool('clang') + + if not cc: + raise SystemError( + "Cannot locate working compiler") + + if cc != oldcc: + # Found a replacement compiler. + # Modify config vars using new compiler, if not already explictly + # overriden by an env variable, preserving additional arguments. + for cv in _COMPILER_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + cv_split = _config_vars[cv].split() + cv_split[0] = cc if cv != 'CXX' else cc + '++' + _save_modified_value(_config_vars, cv, ' '.join(cv_split)) + + return _config_vars + + +def _remove_universal_flags(_config_vars): + """Remove all universal build arguments from config vars""" + + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _remove_unsupported_archs(_config_vars): + """Remove any unsupported archs from config vars""" + # Different Xcode releases support different sets for '-arch' + # flags. In particular, Xcode 4.x no longer supports the + # PPC architectures. + # + # This code automatically removes '-arch ppc' and '-arch ppc64' + # when these are not supported. That makes it possible to + # build extensions on OSX 10.7 and later with the prebuilt + # 32-bit installer on the python.org website. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself + status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%( + _config_vars['CC'].replace("'", "'\"'\"'"),)) + # The Apple compiler drivers return status 255 if no PPC + if (status >> 8) == 255: + # Compiler doesn't support PPC, remove the related + # '-arch' flags if not explicitly overridden by an + # environment variable + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _override_all_archs(_config_vars): + """Allow override of all archs with ARCHFLAGS env var""" + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and '-arch' in _config_vars[cv]: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _check_for_unavailable_sdk(_config_vars): + """Remove references to any SDKs not available""" + # If we're on OSX 10.5 or later and the user tries to + # compile an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. This is particularly important with + # the standalong Command Line Tools alternative to a + # full-blown Xcode install since the CLT packages do not + # provide SDKs. If the SDK is not present, it is assumed + # that the header files and dev libs have been installed + # to /usr and /System/Library by either a standalone CLT + # package or the CLT component within Xcode. + cflags = _config_vars.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', cflags) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def compiler_fixup(compiler_so, cc_args): + """ + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + """ + stripArch = stripSysroot = False + + compiler_so = list(compiler_so) + + if not _supports_universal_builds(): + # OSX before 10.4.0, these don't support -arch and -isysroot at + # all. + stripArch = stripSysroot = True + else: + stripArch = '-arch' in cc_args + stripSysroot = '-isysroot' in cc_args + + if stripArch or 'ARCHFLAGS' in os.environ: + while True: + try: + index = compiler_so.index('-arch') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + if 'ARCHFLAGS' in os.environ and not stripArch: + # User specified different -arch flags in the environ, + # see also distutils.sysconfig + compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + + if stripSysroot: + while True: + try: + index = compiler_so.index('-isysroot') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + # Check if the SDK that is used during compilation actually exists, + # the universal build requires the usage of a universal SDK and not all + # users have that installed by default. + sysroot = None + if '-isysroot' in cc_args: + idx = cc_args.index('-isysroot') + sysroot = cc_args[idx+1] + elif '-isysroot' in compiler_so: + idx = compiler_so.index('-isysroot') + sysroot = compiler_so[idx+1] + + if sysroot and not os.path.isdir(sysroot): + from distutils import log + log.warn("Compiling with an SDK that doesn't seem to exist: %s", + sysroot) + log.warn("Please check your Xcode installation") + + return compiler_so + + +def customize_config_vars(_config_vars): + """Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extention module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + """ + + if not _supports_universal_builds(): + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + _remove_universal_flags(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + # Remove references to sdks that are not found + _check_for_unavailable_sdk(_config_vars) + + return _config_vars + + +def customize_compiler(_config_vars): + """Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + """ + + # Find a compiler to use for extension module builds + _find_appropriate_compiler(_config_vars) + + # Remove ppc arch flags if not supported here + _remove_unsupported_archs(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + return _config_vars + + +def get_platform_osx(_config_vars, osname, release, machine): + """Filter values for get_platform()""" + # called from get_platform() in sysconfig and distutils.util + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + + macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') + macrelease = _get_system_version() or macver + macver = macver or macrelease + + if macver: + release = macver + osname = "macosx" + + # Use the original CFLAGS value, if available, so that we + # return the same machine type for the platform string. + # Otherwise, distutils may consider this a cross-compiling + # case and disallow installs. + cflags = _config_vars.get(_INITPRE+'CFLAGS', + _config_vars.get('CFLAGS', '')) + if ((macrelease + '.') >= '10.4.' and + '-arch' in cflags.strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + + machine = 'fat' + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return (osname, release, machine) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -298,7 +298,7 @@ def seek(self, pos, whence=0): """Change stream position. - Change the stream position to byte offset offset. offset is + Change the stream position to byte offset pos. Argument pos is interpreted relative to the position indicated by whence. Values for whence are ints: @@ -889,12 +889,18 @@ return pos def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def writable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True @@ -1567,6 +1573,8 @@ return self._buffer def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return self._seekable def readable(self): diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -339,7 +339,7 @@ raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = 1900 + year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 @@ -444,6 +444,12 @@ else: tz = value break + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian == -1 and week_of_year != -1 and weekday != -1: @@ -472,6 +478,12 @@ else: gmtoff = None + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + return (year, month, day, hour, minute, second, weekday, julian, tz, gmtoff, tzname), fraction diff --git a/lib-python/3/_weakrefset.py b/lib-python/3/_weakrefset.py --- a/lib-python/3/_weakrefset.py +++ b/lib-python/3/_weakrefset.py @@ -63,7 +63,7 @@ yield item def __len__(self): - return sum(x() is not None for x in self.data) + return len(self.data) - len(self._pending_removals) def __contains__(self, item): try: @@ -114,36 +114,21 @@ def update(self, other): if self._pending_removals: self._commit_removals() - if isinstance(other, self.__class__): - self.data.update(other.data) - else: - for element in other: - self.add(element) + for element in other: + self.add(element) def __ior__(self, other): self.update(other) return self - # Helper functions for simple delegating methods. - def _apply(self, other, method): - if not isinstance(other, self.__class__): - other = self.__class__(other) - newdata = method(other.data) - newset = self.__class__() - newset.data = newdata + def difference(self, other): + newset = self.copy() + newset.difference_update(other) return newset - - def difference(self, other): - return self._apply(other, self.data.difference) __sub__ = difference def difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.difference_update(ref(item) for item in other) + self.__isub__(other) def __isub__(self, other): if self._pending_removals: self._commit_removals() @@ -154,13 +139,11 @@ return self def intersection(self, other): - return self._apply(other, self.data.intersection) + return self.__class__(item for item in other if item in self) __and__ = intersection def intersection_update(self, other): - if self._pending_removals: - self._commit_removals() - self.data.intersection_update(ref(item) for item in other) + self.__iand__(other) def __iand__(self, other): if self._pending_removals: self._commit_removals() @@ -169,17 +152,17 @@ def issubset(self, other): return self.data.issubset(ref(item) for item in other) - __lt__ = issubset + __le__ = issubset - def __le__(self, other): - return self.data <= set(ref(item) for item in other) + def __lt__(self, other): + return self.data < set(ref(item) for item in other) def issuperset(self, other): return self.data.issuperset(ref(item) for item in other) - __gt__ = issuperset + __ge__ = issuperset - def __ge__(self, other): - return self.data >= set(ref(item) for item in other) + def __gt__(self, other): + return self.data > set(ref(item) for item in other) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -187,27 +170,24 @@ return self.data == set(ref(item) for item in other) def symmetric_difference(self, other): - return self._apply(other, self.data.symmetric_difference) + newset = self.copy() + newset.symmetric_difference_update(other) + return newset __xor__ = symmetric_difference def symmetric_difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.__ixor__(other) def __ixor__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) return self def union(self, other): - return self._apply(other, self.data.union) + return self.__class__(e for s in (self, other) for e in s) __or__ = union def isdisjoint(self, other): diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -692,7 +692,9 @@ self._patchheader() def close(self): - if self._file: + if self._file is None: + return + try: self._ensure_header_written(0) if self._datawritten & 1: # quick pad to even size @@ -703,10 +705,12 @@ self._datalength != self._datawritten or \ self._marklength: self._patchheader() + finally: # Prevent ref cycles self._convert = None - self._file.close() + f = self._file self._file = None + f.close() # # Internal methods. diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -736,10 +736,10 @@ - default -- The value to be produced if the option is not specified. - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate @@ -1701,9 +1701,12 @@ return args def parse_known_args(self, args=None, namespace=None): - # args default to the system args if args is None: + # args default to the system args args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) # default Namespace built from parser defaults if namespace is None: @@ -1714,10 +1717,7 @@ if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, str): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) + setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: @@ -1957,12 +1957,23 @@ if positionals: self.error(_('too few arguments')) - # make sure all required actions were present + # make sure all required actions were present, and convert defaults. for action in self._actions: - if action.required: - if action not in seen_actions: + if action not in seen_actions: + if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: @@ -1988,7 +1999,7 @@ for arg_string in arg_strings: # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content @@ -2198,9 +2209,12 @@ # Value conversion methods # ======================== def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' + # for everything but PARSER, REMAINDER args, strip out first '--' if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] + try: + arg_strings.remove('--') + except ValueError: + pass # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: diff --git a/lib-python/3/asyncore.py b/lib-python/3/asyncore.py --- a/lib-python/3/asyncore.py +++ b/lib-python/3/asyncore.py @@ -225,6 +225,7 @@ debug = False connected = False accepting = False + connecting = False closing = False addr = None ignore_log_types = frozenset(['warning']) @@ -248,7 +249,7 @@ try: self.addr = sock.getpeername() except socket.error as err: - if err.args[0] == ENOTCONN: + if err.args[0] in (ENOTCONN, EINVAL): # To handle the case where we got an unconnected # socket. self.connected = False @@ -342,9 +343,11 @@ def connect(self, address): self.connected = False + self.connecting = True err = self.socket.connect_ex(address) if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ or err == EINVAL and os.name in ('nt', 'ce'): + self.addr = address return if err in (0, EISCONN): self.addr = address @@ -390,7 +393,7 @@ else: return data except socket.error as why: - # winsock sometimes throws ENOTCONN + # winsock sometimes raises ENOTCONN if why.args[0] in _DISCONNECTED: self.handle_close() return b'' @@ -400,6 +403,7 @@ def close(self): self.connected = False self.accepting = False + self.connecting = False self.del_channel() try: self.socket.close() @@ -438,7 +442,8 @@ # sockets that are connected self.handle_accept() elif not self.connected: - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_read() else: self.handle_read() @@ -449,6 +454,7 @@ raise socket.error(err, _strerror(err)) self.handle_connect() self.connected = True + self.connecting = False def handle_write_event(self): if self.accepting: @@ -457,12 +463,8 @@ return if not self.connected: - #check for errors - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - raise socket.error(err, _strerror(err)) - - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_write() def handle_expt_event(self): diff --git a/lib-python/3/bdb.py b/lib-python/3/bdb.py --- a/lib-python/3/bdb.py +++ b/lib-python/3/bdb.py @@ -22,6 +22,7 @@ self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} + self.frame_returning = None def canonic(self, filename): if filename == "<" + filename[1:-1] + ">": @@ -80,7 +81,11 @@ def dispatch_return(self, frame, arg): if self.stop_here(frame) or frame == self.returnframe: - self.user_return(frame, arg) + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None if self.quitting: raise BdbQuit return self.trace_dispatch @@ -186,6 +191,14 @@ def set_step(self): """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch self._set_stopinfo(None, None) def set_next(self, frame): diff --git a/lib-python/3/calendar.py b/lib-python/3/calendar.py --- a/lib-python/3/calendar.py +++ b/lib-python/3/calendar.py @@ -161,7 +161,11 @@ oneday = datetime.timedelta(days=1) while True: yield date - date += oneday + try: + date += oneday + except OverflowError: + # Adding one day could fail after datetime.MAXYEAR + break if date.month != month and date.weekday() == self.firstweekday: break diff --git a/lib-python/3/cgi.py b/lib-python/3/cgi.py --- a/lib-python/3/cgi.py +++ b/lib-python/3/cgi.py @@ -214,17 +214,17 @@ """ import http.client - boundary = "" + boundary = b"" if 'boundary' in pdict: boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError('Invalid boundary in multipart form: %r' % (boundary,)) - nextpart = "--" + boundary - lastpart = "--" + boundary + "--" + nextpart = b"--" + boundary + lastpart = b"--" + boundary + b"--" partdict = {} - terminator = "" + terminator = b"" while terminator != lastpart: bytes = -1 @@ -243,7 +243,7 @@ raise ValueError('Maximum content length exceeded') data = fp.read(bytes) else: - data = "" + data = b"" # Read lines until end of part. lines = [] while 1: @@ -251,7 +251,7 @@ if not line: terminator = lastpart # End outer loop break - if line.startswith("--"): + if line.startswith(b"--"): terminator = line.rstrip() if terminator in (nextpart, lastpart): break @@ -263,12 +263,12 @@ if lines: # Strip final line terminator line = lines[-1] - if line[-2:] == "\r\n": + if line[-2:] == b"\r\n": line = line[:-2] - elif line[-1:] == "\n": + elif line[-1:] == b"\n": line = line[:-1] lines[-1] = line - data = "".join(lines) + data = b"".join(lines) line = headers['content-disposition'] if not line: continue diff --git a/lib-python/3/cgitb.py b/lib-python/3/cgitb.py --- a/lib-python/3/cgitb.py +++ b/lib-python/3/cgitb.py @@ -293,14 +293,19 @@ if self.logdir is not None: suffix = ['.txt', '.html'][self.format=="html"] (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + try: file = os.fdopen(fd, 'w') file.write(doc) file.close() - msg = '

    %s contains the description of this error.' % path + msg = '%s contains the description of this error.' % path except: - msg = '

    Tried to save traceback to %s, but failed.' % path - self.file.write(msg + '\n') + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

    %s

    \n' % msg) + else: + self.file.write(msg + '\n') try: self.file.flush() except: pass diff --git a/lib-python/3/collections.py b/lib-python/3/collections.py --- a/lib-python/3/collections.py +++ b/lib-python/3/collections.py @@ -281,6 +281,10 @@ 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + return None + {field_defs} ''' diff --git a/lib-python/3/concurrent/futures/_base.py b/lib-python/3/concurrent/futures/_base.py --- a/lib-python/3/concurrent/futures/_base.py +++ b/lib-python/3/concurrent/futures/_base.py @@ -112,12 +112,14 @@ def __init__(self, num_pending_calls, stop_on_exception): self.num_pending_calls = num_pending_calls self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() super().__init__() def _decrement_pending_calls(self): - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() def add_result(self, future): super().add_result(future) @@ -517,7 +519,7 @@ """Returns a iterator equivalent to map(fn, iter). Args: - fn: A callable that will take take as many arguments as there are + fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -99,10 +99,9 @@ yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) - If section is given, return a list of tuples with (section_name, - section_proxy) for each section, including DEFAULTSECT. Otherwise, - return a list of tuples with (name, value) for each option - in the section. + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. @@ -852,6 +851,19 @@ value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] + def popitem(self): + """Remove a section from the parser and return it as + a (section_name, section_proxy) tuple. If no section is present, raise + KeyError. + + The section DEFAULT is never returned because it cannot be removed. + """ + for key in self.sections(): + value = self[key] + del self[key] + return key, value + raise KeyError + def optionxform(self, optionstr): return optionstr.lower() @@ -947,7 +959,8 @@ # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. - self.remove_section(key) + if key in self._sections: + self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): diff --git a/lib-python/3/ctypes/test/test_bitfields.py b/lib-python/3/ctypes/test/test_bitfields.py --- a/lib-python/3/ctypes/test/test_bitfields.py +++ b/lib-python/3/ctypes/test/test_bitfields.py @@ -246,5 +246,25 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] + @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + def test_uint32(self): + class X(Structure): + _fields_ = [("a", c_uint32, 32)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFDCBA987 + self.assertEqual(x.a, 0xFDCBA987) + + @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + def test_uint64(self): + class X(Structure): + _fields_ = [("a", c_uint64, 64)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFEDCBA9876543211 + self.assertEqual(x.a, 0xFEDCBA9876543211) + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_numbers.py b/lib-python/3/ctypes/test/test_numbers.py --- a/lib-python/3/ctypes/test/test_numbers.py +++ b/lib-python/3/ctypes/test/test_numbers.py @@ -220,6 +220,16 @@ # probably be changed: self.assertRaises(TypeError, c_int, c_long(42)) + def test_float_overflow(self): + import sys + big_int = int(sys.float_info.max) * 2 + for t in float_types + [c_longdouble]: + self.assertRaises(OverflowError, t, big_int) + if (hasattr(t, "__ctype_be__")): + self.assertRaises(OverflowError, t.__ctype_be__, big_int) + if (hasattr(t, "__ctype_le__")): + self.assertRaises(OverflowError, t.__ctype_le__, big_int) + ## def test_perf(self): ## check_perf() diff --git a/lib-python/3/ctypes/test/test_returnfuncptrs.py b/lib-python/3/ctypes/test/test_returnfuncptrs.py --- a/lib-python/3/ctypes/test/test_returnfuncptrs.py +++ b/lib-python/3/ctypes/test/test_returnfuncptrs.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +import os import _ctypes_test @@ -33,5 +34,34 @@ self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) self.assertRaises(TypeError, strchr, b"abcdef") + def test_from_dll(self): + dll = CDLL(_ctypes_test.__file__) + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(("my_strchr", dll)) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + + # Issue 6083: Reference counting bug + def test_from_dll_refcount(self): + class BadSequence(tuple): + def __getitem__(self, key): + if key == 0: + return "my_strchr" + if key == 1: + return CDLL(_ctypes_test.__file__) + raise IndexError + + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)( + BadSequence(("my_strchr", CDLL(_ctypes_test.__file__)))) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_structures.py b/lib-python/3/ctypes/test/test_structures.py --- a/lib-python/3/ctypes/test/test_structures.py +++ b/lib-python/3/ctypes/test/test_structures.py @@ -1,6 +1,7 @@ import unittest from ctypes import * from struct import calcsize +import _testcapi class SubclassesTest(unittest.TestCase): def test_subclass(self): @@ -199,6 +200,14 @@ "_pack_": -1} self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + # Issue 15989 + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.INT_MAX + 1} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.UINT_MAX + 2} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + def test_initializers(self): class Person(Structure): _fields_ = [("name", c_char*6), diff --git a/lib-python/3/ctypes/test/test_win32.py b/lib-python/3/ctypes/test/test_win32.py --- a/lib-python/3/ctypes/test/test_win32.py +++ b/lib-python/3/ctypes/test/test_win32.py @@ -3,6 +3,7 @@ from ctypes import * from ctypes.test import is_resource_enabled import unittest, sys +from test import support import _ctypes_test @@ -60,7 +61,9 @@ def test_COMError(self): from _ctypes import COMError - self.assertEqual(COMError.__doc__, "Raised when a COM method call failed.") + if support.HAVE_DOCSTRINGS: + self.assertEqual(COMError.__doc__, + "Raised when a COM method call failed.") ex = COMError(-1, "text", ("details",)) self.assertEqual(ex.hresult, -1) diff --git a/lib-python/3/curses/__init__.py b/lib-python/3/curses/__init__.py --- a/lib-python/3/curses/__init__.py +++ b/lib-python/3/curses/__init__.py @@ -5,7 +5,7 @@ import curses from curses import textpad - curses.initwin() + curses.initscr() ... """ diff --git a/lib-python/3/decimal.py b/lib-python/3/decimal.py --- a/lib-python/3/decimal.py +++ b/lib-python/3/decimal.py @@ -1555,7 +1555,13 @@ def __float__(self): """Float representation.""" - return float(str(self)) + if self._isnan(): + if self.is_snan(): + raise ValueError("Cannot convert signaling NaN to float") + s = "-nan" if self._sign else "nan" + else: + s = str(self) + return float(s) def __int__(self): """Converts self to an int, truncating if necessary.""" diff --git a/lib-python/3/distutils/__init__.py b/lib-python/3/distutils/__init__.py --- a/lib-python/3/distutils/__init__.py +++ b/lib-python/3/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.2.3" +__version__ = "3.2.5" #--end constants-- diff --git a/lib-python/3/distutils/command/bdist_rpm.py b/lib-python/3/distutils/command/bdist_rpm.py --- a/lib-python/3/distutils/command/bdist_rpm.py +++ b/lib-python/3/distutils/command/bdist_rpm.py @@ -3,7 +3,7 @@ Implements the Distutils 'bdist_rpm' command (create RPM source and binary distributions).""" -import sys, os +import subprocess, sys, os from distutils.core import Command from distutils.debug import DEBUG from distutils.util import get_platform @@ -190,7 +190,7 @@ if self.fix_python: self.python = sys.executable else: - self.python = "python" + self.python = "python3" elif self.fix_python: raise DistutilsOptionError( "--python and --fix-python are mutually exclusive options") @@ -320,6 +320,7 @@ rpm_cmd.append('-bb') else: rpm_cmd.append('-ba') + rpm_cmd.extend(['--define', '__python %s' % self.python]) if self.rpm3_mode: rpm_cmd.extend(['--define', '_topdir %s' % os.path.abspath(self.rpm_base)]) @@ -405,6 +406,21 @@ 'Summary: ' + self.distribution.get_description(), ] + # Workaround for #14443 which affects some RPM based systems such as + # RHEL6 (and probably derivatives) + vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}') + # Generate a potential replacement value for __os_install_post (whilst + # normalizing the whitespace to simplify the test for whether the + # invocation of brp-python-bytecompile passes in __python): + vendor_hook = '\n'.join([' %s \\' % line.strip() + for line in vendor_hook.splitlines()]) + problem = "brp-python-bytecompile \\\n" + fixed = "brp-python-bytecompile %{__python} \\\n" + fixed_hook = vendor_hook.replace(problem, fixed) + if fixed_hook != vendor_hook: + spec_file.append('# Workaround for http://bugs.python.org/issue14443') + spec_file.append('%define __os_install_post ' + fixed_hook + '\n') + # put locale summaries into spec file # XXX not supported for now (hard to put a dictionary # in a config file -- arg!) diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -682,13 +682,13 @@ # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() - if so_ext is None: - so_ext = get_config_var('SO') # fall-back + ext_suffix = _get_c_extension_suffix() + if ext_suffix is None: + ext_suffix = get_config_var('EXT_SUFFIX') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' - return os.path.join(*ext_path) + so_ext + ext_suffix = '_d.pyd' + return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): """Return the list of symbols that a shared extension has to diff --git a/lib-python/3/distutils/command/check.py b/lib-python/3/distutils/command/check.py --- a/lib-python/3/distutils/command/check.py +++ b/lib-python/3/distutils/command/check.py @@ -23,6 +23,9 @@ def system_message(self, level, message, *children, **kwargs): self.messages.append((level, message, children, kwargs)) + return nodes.system_message(message, level=level, + type=self.levels[level], + *children, **kwargs) HAS_DOCUTILS = True except Exception: diff --git a/lib-python/3/distutils/command/install.py b/lib-python/3/distutils/command/install.py --- a/lib-python/3/distutils/command/install.py +++ b/lib-python/3/distutils/command/install.py @@ -285,8 +285,8 @@ if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): - raise DistutilsOptionError("can't combine user with with prefix/" - "exec_prefix/home or install_(plat)base") + raise DistutilsOptionError("can't combine user with prefix, " + "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": diff --git a/lib-python/3/distutils/command/upload.py b/lib-python/3/distutils/command/upload.py --- a/lib-python/3/distutils/command/upload.py +++ b/lib-python/3/distutils/command/upload.py @@ -125,7 +125,7 @@ if self.sign: data['gpg_signature'] = (os.path.basename(filename) + ".asc", - open(filename+".asc").read()) + open(filename+".asc", "rb").read()) # set up the authentication user_pass = (self.username + ":" + self.password).encode('ascii') diff --git a/lib-python/3/distutils/config.py b/lib-python/3/distutils/config.py --- a/lib-python/3/distutils/config.py +++ b/lib-python/3/distutils/config.py @@ -4,7 +4,6 @@ that uses .pypirc in the distutils.command package. """ import os -import sys from configparser import ConfigParser from distutils.cmd import Command @@ -43,16 +42,8 @@ def _store_pypirc(self, username, password): """Creates a default .pypirc file.""" rc = self._get_rc_file() - f = open(rc, 'w') - try: + with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: f.write(DEFAULT_PYPIRC % (username, password)) - finally: - f.close() - try: - os.chmod(rc, 0o600) - except OSError: - # should do something better here - pass def _read_pypirc(self): """Reads the .pypirc file.""" diff --git a/lib-python/3/distutils/dir_util.py b/lib-python/3/distutils/dir_util.py --- a/lib-python/3/distutils/dir_util.py +++ b/lib-python/3/distutils/dir_util.py @@ -141,6 +141,10 @@ src_name = os.path.join(src, n) dst_name = os.path.join(dst, n) + if n.startswith('.nfs'): + # skip NFS rename files + continue + if preserve_symlinks and os.path.islink(src_name): link_dest = os.readlink(src_name) if verbose >= 1: diff --git a/lib-python/3/distutils/sysconfig.py b/lib-python/3/distutils/sysconfig.py --- a/lib-python/3/distutils/sysconfig.py +++ b/lib-python/3/distutils/sysconfig.py @@ -23,6 +23,3 @@ from distutils.sysconfig_cpython import * from distutils.sysconfig_cpython import _config_vars # needed by setuptools from distutils.sysconfig_cpython import _variable_rx # read_setup_file() - -_USE_CLANG = None - diff --git a/lib-python/3/distutils/sysconfig_cpython.py b/lib-python/3/distutils/sysconfig_cpython.py --- a/lib-python/3/distutils/sysconfig_cpython.py +++ b/lib-python/3/distutils/sysconfig_cpython.py @@ -146,7 +146,7 @@ "I don't know where Python installs its library " "on platform '%s'" % os.name) -_USE_CLANG = None + def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. @@ -155,42 +155,28 @@ varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": - (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' + + (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') newcc = None if 'CC' in os.environ: - newcc = os.environ['CC'] - elif sys.platform == 'darwin' and cc == 'gcc-4.2': - # Issue #13590: - # Since Apple removed gcc-4.2 in Xcode 4.2, we can no - # longer assume it is available for extension module builds. - # If Python was built with gcc-4.2, check first to see if - # it is available on this system; if not, try to use clang - # instead unless the caller explicitly set CC. - global _USE_CLANG - if _USE_CLANG is None: - from distutils import log - from subprocess import Popen, PIPE - p = Popen("! type gcc-4.2 && type clang && exit 2", - shell=True, stdout=PIPE, stderr=PIPE) - p.wait() - if p.returncode == 2: - _USE_CLANG = True - log.warn("gcc-4.2 not found, using clang instead") - else: - _USE_CLANG = False - if _USE_CLANG: - newcc = 'clang' - if newcc: - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - ldshared = newcc + ldshared[len(cc):] - cc = newcc + cc = os.environ['CC'] if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: @@ -225,7 +211,7 @@ linker_exe=cc, archiver=archiver) - compiler.shared_lib_extension = so_ext + compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): @@ -480,6 +466,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) @@ -499,6 +486,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" global _config_vars @@ -543,43 +531,11 @@ srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _config_vars[key] = flags - - else: - - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _config_vars[key] = flags + import _osx_support + _osx_support.customize_config_vars(_config_vars) if args: vals = [] diff --git a/lib-python/3/distutils/tests/test_bdist_dumb.py b/lib-python/3/distutils/tests/test_bdist_dumb.py --- a/lib-python/3/distutils/tests/test_bdist_dumb.py +++ b/lib-python/3/distutils/tests/test_bdist_dumb.py @@ -88,9 +88,9 @@ fp.close() contents = sorted(os.path.basename(fn) for fn in contents) - wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], - 'foo.%s.pyc' % imp.get_tag(), - 'foo.py'] + wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py'] + if not sys.dont_write_bytecode: + wanted.append('foo.%s.pyc' % imp.get_tag()) self.assertEqual(contents, sorted(wanted)) def test_suite(): diff --git a/lib-python/3/distutils/tests/test_bdist_msi.py b/lib-python/3/distutils/tests/test_bdist_msi.py --- a/lib-python/3/distutils/tests/test_bdist_msi.py +++ b/lib-python/3/distutils/tests/test_bdist_msi.py @@ -1,12 +1,11 @@ """Tests for distutils.command.bdist_msi.""" +import sys import unittest -import sys - from test.support import run_unittest - from distutils.tests import support - at unittest.skipUnless(sys.platform=="win32", "These tests are only for win32") + + at unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows') class BDistMSITestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): @@ -14,10 +13,11 @@ def test_minimal(self): # minimal test XXX need more tests from distutils.command.bdist_msi import bdist_msi - pkg_pth, dist = self.create_dist() + project_dir, dist = self.create_dist() cmd = bdist_msi(dist) cmd.ensure_finalized() + def test_suite(): return unittest.makeSuite(BDistMSITestCase) diff --git a/lib-python/3/distutils/tests/test_build_ext.py b/lib-python/3/distutils/tests/test_build_ext.py --- a/lib-python/3/distutils/tests/test_build_ext.py +++ b/lib-python/3/distutils/tests/test_build_ext.py @@ -73,8 +73,9 @@ self.assertEqual(xx.foo(2, 5), 7) self.assertEqual(xx.foo(13,15), 28) self.assertEqual(xx.new().demo(), None) - doc = 'This is a template module just for instruction.' - self.assertEqual(xx.__doc__, doc) + if support.HAVE_DOCSTRINGS: + doc = 'This is a template module just for instruction.' + self.assertEqual(xx.__doc__, doc) self.assertTrue(isinstance(xx.Null(), xx.Null)) self.assertTrue(isinstance(xx.Str(), xx.Str)) @@ -317,8 +318,8 @@ finally: os.chdir(old_wd) self.assertTrue(os.path.exists(so_file)) - so_ext = sysconfig.get_config_var('SO') - self.assertTrue(so_file.endswith(so_ext)) + ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, other_tmp_dir) @@ -327,7 +328,7 @@ cmd.run() so_file = cmd.get_outputs()[0] self.assertTrue(os.path.exists(so_file)) - self.assertTrue(so_file.endswith(so_ext)) + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, cmd.build_lib) @@ -354,7 +355,7 @@ self.assertEqual(lastdir, 'bar') def test_ext_fullpath(self): - ext = sysconfig.get_config_vars()['SO'] + ext = sysconfig.get_config_var('EXT_SUFFIX') # building lxml.etree inplace #etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') #etree_ext = Extension('lxml.etree', [etree_c]) diff --git a/lib-python/3/distutils/tests/test_dir_util.py b/lib-python/3/distutils/tests/test_dir_util.py --- a/lib-python/3/distutils/tests/test_dir_util.py +++ b/lib-python/3/distutils/tests/test_dir_util.py @@ -76,7 +76,6 @@ remove_tree(self.root_target, verbose=0) - def test_copy_tree_verbosity(self): mkpath(self.target, verbose=0) @@ -88,11 +87,8 @@ mkpath(self.target, verbose=0) a_file = os.path.join(self.target, 'ok.txt') - f = open(a_file, 'w') - try: + with open(a_file, 'w') as f: f.write('some content') - finally: - f.close() wanted = ['copying %s -> %s' % (a_file, self.target2)] copy_tree(self.target, self.target2, verbose=1) @@ -101,6 +97,21 @@ remove_tree(self.root_target, verbose=0) remove_tree(self.target2, verbose=0) + def test_copy_tree_skips_nfs_temp_files(self): + mkpath(self.target, verbose=0) + + a_file = os.path.join(self.target, 'ok.txt') + nfs_file = os.path.join(self.target, '.nfs123abc') + for f in a_file, nfs_file: + with open(f, 'w') as fh: + fh.write('some content') + + copy_tree(self.target, self.target2) + self.assertEqual(os.listdir(self.target2), ['ok.txt']) + + remove_tree(self.root_target, verbose=0) + remove_tree(self.target2, verbose=0) + def test_ensure_relative(self): if os.sep == '/': self.assertEqual(ensure_relative('/home/foo'), 'home/foo') diff --git a/lib-python/3/distutils/tests/test_install.py b/lib-python/3/distutils/tests/test_install.py --- a/lib-python/3/distutils/tests/test_install.py +++ b/lib-python/3/distutils/tests/test_install.py @@ -23,7 +23,7 @@ def _make_ext_name(modname): if os.name == 'nt' and sys.executable.endswith('_d.exe'): modname += '_d' - return modname + sysconfig.get_config_var('SO') + return modname + sysconfig.get_config_var('EXT_SUFFIX') class InstallTestCase(support.TempdirManager, @@ -165,7 +165,7 @@ cmd.home = 'home' self.assertRaises(DistutilsOptionError, cmd.finalize_options) - # can't combine user with with prefix/exec_prefix/home or + # can't combine user with prefix/exec_prefix/home or # install_(plat)base cmd.prefix = None cmd.user = 'user' diff --git a/lib-python/3/distutils/tests/test_msvc9compiler.py b/lib-python/3/distutils/tests/test_msvc9compiler.py --- a/lib-python/3/distutils/tests/test_msvc9compiler.py +++ b/lib-python/3/distutils/tests/test_msvc9compiler.py @@ -104,7 +104,7 @@ unittest.TestCase): def test_no_compiler(self): - # makes sure query_vcvarsall throws + # makes sure query_vcvarsall raises # a DistutilsPlatformError if the compiler # is not found from distutils.msvc9compiler import query_vcvarsall diff --git a/lib-python/3/distutils/tests/test_register.py b/lib-python/3/distutils/tests/test_register.py --- a/lib-python/3/distutils/tests/test_register.py +++ b/lib-python/3/distutils/tests/test_register.py @@ -1,5 +1,4 @@ """Tests for distutils.command.register.""" -import sys import os import unittest import getpass @@ -10,11 +9,14 @@ from distutils.command import register as register_module from distutils.command.register import register -from distutils.core import Distribution from distutils.errors import DistutilsSetupError -from distutils.tests import support -from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase +from distutils.tests.test_config import PyPIRCCommandTestCase + +try: + import docutils +except ImportError: + docutils = None PYPIRC_NOPASSWORD = """\ [distutils] @@ -193,6 +195,7 @@ self.assertEqual(headers['Content-length'], '290') self.assertTrue((b'tarek') in req.data) + @unittest.skipUnless(docutils is not None, 'needs docutils') def test_strict(self): # testing the script option # when on, the register command stops if @@ -205,13 +208,6 @@ cmd.strict = 1 self.assertRaises(DistutilsSetupError, cmd.run) - # we don't test the reSt feature if docutils - # is not installed - try: - import docutils - except ImportError: - return - # metadata are OK but long_description is broken metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'éxéxé', @@ -265,6 +261,22 @@ finally: del register_module.input + @unittest.skipUnless(docutils is not None, 'needs docutils') + def test_register_invalid_long_description(self): + description = ':funkie:`str`' # mimic Sphinx-specific markup + metadata = {'url': 'xxx', 'author': 'xxx', + 'author_email': 'xxx', + 'name': 'xxx', 'version': 'xxx', + 'long_description': description} + cmd = self._get_cmd(metadata) + cmd.ensure_finalized() + cmd.strict = True + inputs = Inputs('2', 'tarek', 'tarek at ziade.org') + register_module.input = inputs + self.addCleanup(delattr, register_module, 'input') + + self.assertRaises(DistutilsSetupError, cmd.run) + def test_check_metadata_deprecated(self): # makes sure make_metadata is deprecated cmd = self._get_cmd() diff --git a/lib-python/3/distutils/tests/test_sdist.py b/lib-python/3/distutils/tests/test_sdist.py --- a/lib-python/3/distutils/tests/test_sdist.py +++ b/lib-python/3/distutils/tests/test_sdist.py @@ -6,6 +6,7 @@ import zipfile from os.path import join from textwrap import dedent +from test.support import captured_stdout, check_warnings, run_unittest try: import zlib @@ -13,7 +14,6 @@ except ImportError: ZLIB_SUPPORT = False -from test.support import captured_stdout, check_warnings, run_unittest from distutils.command.sdist import sdist, show_formats from distutils.core import Distribution @@ -83,9 +83,8 @@ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') def test_prune_file_list(self): - # this test creates a package with some vcs dirs in it - # and launch sdist to make sure they get pruned - # on all systems + # this test creates a project with some VCS dirs and an NFS rename + # file, then launches sdist to check they get pruned on all systems # creating VCS directories with some files in them os.mkdir(join(self.tmp_dir, 'somecode', '.svn')) @@ -99,6 +98,8 @@ self.write_file((self.tmp_dir, 'somecode', '.git', 'ok'), 'xxx') + self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx') + # now building a sdist dist, cmd = self.get_cmd() @@ -326,6 +327,7 @@ # filling data_files by pointing files in package_data dist.package_data = {'somecode': ['*.txt']} self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#') + cmd.formats = ['gztar'] cmd.ensure_finalized() cmd.run() diff --git a/lib-python/3/distutils/tests/test_sysconfig.py b/lib-python/3/distutils/tests/test_sysconfig.py --- a/lib-python/3/distutils/tests/test_sysconfig.py +++ b/lib-python/3/distutils/tests/test_sysconfig.py @@ -102,7 +102,27 @@ import sysconfig as global_sysconfig self.assertEqual(global_sysconfig.get_config_var('CFLAGS'), sysconfig.get_config_var('CFLAGS')) self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'), sysconfig.get_config_var('LDFLAGS')) - self.assertEqual(global_sysconfig.get_config_var('LDSHARED'),sysconfig.get_config_var('LDSHARED')) + + @unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'),'compiler flags customized') + def test_sysconfig_compiler_vars(self): + # On OS X, binary installers support extension module building on + # various levels of the operating system with differing Xcode + # configurations. This requires customization of some of the + # compiler configuration directives to suit the environment on + # the installed machine. Some of these customizations may require + # running external programs and, so, are deferred until needed by + # the first extension module build. With Python 3.3, only + # the Distutils version of sysconfig is used for extension module + # builds, which happens earlier in the Distutils tests. This may + # cause the following tests to fail since no tests have caused + # the global version of sysconfig to call the customization yet. + # The solution for now is to simply skip this test in this case. + # The longer-term solution is to only have one version of sysconfig. + + import sysconfig as global_sysconfig + if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'): + return + self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), sysconfig.get_config_var('LDSHARED')) self.assertEqual(global_sysconfig.get_config_var('CC'), sysconfig.get_config_var('CC')) diff --git a/lib-python/3/distutils/tests/test_util.py b/lib-python/3/distutils/tests/test_util.py --- a/lib-python/3/distutils/tests/test_util.py +++ b/lib-python/3/distutils/tests/test_util.py @@ -13,6 +13,7 @@ from distutils.sysconfig import get_config_vars from distutils import sysconfig from distutils.tests import support +import _osx_support class UtilTestCase(support.EnvironGuard, unittest.TestCase): @@ -92,6 +93,7 @@ ('Darwin Kernel Version 8.11.1: ' 'Wed Oct 10 18:23:28 PDT 2007; ' 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386')) + _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3' From noreply at buildbot.pypy.org Thu Apr 10 00:17:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 10 Apr 2014 00:17:07 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: merge py3k Message-ID: <20140409221707.074941C12F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70527:df78e44258ec Date: 2014-04-09 15:15 -0700 http://bitbucket.org/pypy/pypy/changeset/df78e44258ec/ Log: merge py3k diff too long, truncating to 2000 out of 30044 lines diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,7 +48,10 @@ def tearDown(self): os.chdir(self.old_dir) - shutil.rmtree(self.temp_dir, True) + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) diff --git a/lib-python/3/__future__.py b/lib-python/3/__future__.py --- a/lib-python/3/__future__.py +++ b/lib-python/3/__future__.py @@ -114,7 +114,7 @@ CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), - (2, 7, 0, "alpha", 0), + (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), diff --git a/lib-python/3/_abcoll.py b/lib-python/3/_abcoll.py --- a/lib-python/3/_abcoll.py +++ b/lib-python/3/_abcoll.py @@ -184,12 +184,12 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return other.__lt__(self) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + return other.__le__(self) def __eq__(self, other): if not isinstance(other, Set): diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py new file mode 100644 --- /dev/null +++ b/lib-python/3/_osx_support.py @@ -0,0 +1,488 @@ +"""Shared OS X support functions.""" + +import os +import re +import sys + +__all__ = [ + 'compiler_fixup', + 'customize_config_vars', + 'customize_compiler', + 'get_platform_osx', +] + +# configuration variables that may contain universal build flags, +# like "-arch" or "-isdkroot", that may need customization for +# the user environment +_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', + 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', + 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', + 'PY_CORE_CFLAGS') + +# configuration variables that may contain compiler calls +_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') + +# prefix added to original configuration variable names +_INITPRE = '_OSX_SUPPORT_INITIAL_' + + +def _find_executable(executable, path=None): + """Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + """ + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + base, ext = os.path.splitext(executable) + + if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): + executable = executable + '.exe' + + if not os.path.isfile(executable): + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None + else: + return executable + + +def _read_output(commandstring): + """Output from succesful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + # tempfile is also not available then. + import contextlib + try: + import tempfile + fp = tempfile.NamedTemporaryFile() + except ImportError: + fp = open("/tmp/_osx_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read().decode('utf-8').strip() if not os.system(cmd) else None + + +def _find_build_tool(toolname): + """Find a build tool on current path or using xcrun""" + return (_find_executable(toolname) + or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) + or '' + ) + +_SYSTEM_VERSION = None + +def _get_system_version(): + """Return the OS X system version as a string""" + # Reading this plist is a documented way to get the system + # version (see the documentation for the Gestalt Manager) + # We avoid using platform.mac_ver to avoid possible bootstrap issues during + # the build of Python itself (distutils is used to build standard library + # extensions). + + global _SYSTEM_VERSION + + if _SYSTEM_VERSION is None: + _SYSTEM_VERSION = '' + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + return _SYSTEM_VERSION + +def _remove_original_values(_config_vars): + """Remove original unmodified values for testing""" + # This is needed for higher-level cross-platform tests of get_platform. + for k in list(_config_vars): + if k.startswith(_INITPRE): + del _config_vars[k] + +def _save_modified_value(_config_vars, cv, newvalue): + """Save modified and original unmodified value of configuration var""" + + oldvalue = _config_vars.get(cv, '') + if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): + _config_vars[_INITPRE + cv] = oldvalue + _config_vars[cv] = newvalue + +def _supports_universal_builds(): + """Returns True if universal builds are supported on this system""" + # As an approximation, we assume that if we are running on 10.4 or above, + # then we are running with an Xcode environment that supports universal + # builds, in particular -isysroot and -arch arguments to the compiler. This + # is in support of allowing 10.4 universal builds to run on 10.3.x systems. + + osx_version = _get_system_version() + if osx_version: + try: + osx_version = tuple(int(i) for i in osx_version.split('.')) + except ValueError: + osx_version = '' + return bool(osx_version >= (10, 4)) if osx_version else False + + +def _find_appropriate_compiler(_config_vars): + """Find appropriate C compiler for extension module builds""" + + # Issue #13590: + # The OSX location for the compiler varies between OSX + # (or rather Xcode) releases. With older releases (up-to 10.5) + # the compiler is in /usr/bin, with newer releases the compiler + # can only be found inside Xcode.app if the "Command Line Tools" + # are not installed. + # + # Futhermore, the compiler that can be used varies between + # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' + # as the compiler, after that 'clang' should be used because + # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that + # miscompiles Python. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + # The CC config var might contain additional arguments. + # Ignore them while searching. + cc = oldcc = _config_vars['CC'].split()[0] + if not _find_executable(cc): + # Compiler is not found on the shell search PATH. + # Now search for clang, first on PATH (if the Command LIne + # Tools have been installed in / or if the user has provided + # another location via CC). If not found, try using xcrun + # to find an uninstalled clang (within a selected Xcode). + + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself (and os.popen is + # implemented on top of subprocess and is therefore not + # usable as well) + + cc = _find_build_tool('clang') + + elif os.path.basename(cc).startswith('gcc'): + # Compiler is GCC, check if it is LLVM-GCC + data = _read_output("'%s' --version" + % (cc.replace("'", "'\"'\"'"),)) + if 'llvm-gcc' in data: + # Found LLVM-GCC, fall back to clang + cc = _find_build_tool('clang') + + if not cc: + raise SystemError( + "Cannot locate working compiler") + + if cc != oldcc: + # Found a replacement compiler. + # Modify config vars using new compiler, if not already explictly + # overriden by an env variable, preserving additional arguments. + for cv in _COMPILER_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + cv_split = _config_vars[cv].split() + cv_split[0] = cc if cv != 'CXX' else cc + '++' + _save_modified_value(_config_vars, cv, ' '.join(cv_split)) + + return _config_vars + + +def _remove_universal_flags(_config_vars): + """Remove all universal build arguments from config vars""" + + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _remove_unsupported_archs(_config_vars): + """Remove any unsupported archs from config vars""" + # Different Xcode releases support different sets for '-arch' + # flags. In particular, Xcode 4.x no longer supports the + # PPC architectures. + # + # This code automatically removes '-arch ppc' and '-arch ppc64' + # when these are not supported. That makes it possible to + # build extensions on OSX 10.7 and later with the prebuilt + # 32-bit installer on the python.org website. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself + status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%( + _config_vars['CC'].replace("'", "'\"'\"'"),)) + # The Apple compiler drivers return status 255 if no PPC + if (status >> 8) == 255: + # Compiler doesn't support PPC, remove the related + # '-arch' flags if not explicitly overridden by an + # environment variable + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _override_all_archs(_config_vars): + """Allow override of all archs with ARCHFLAGS env var""" + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and '-arch' in _config_vars[cv]: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _check_for_unavailable_sdk(_config_vars): + """Remove references to any SDKs not available""" + # If we're on OSX 10.5 or later and the user tries to + # compile an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. This is particularly important with + # the standalong Command Line Tools alternative to a + # full-blown Xcode install since the CLT packages do not + # provide SDKs. If the SDK is not present, it is assumed + # that the header files and dev libs have been installed + # to /usr and /System/Library by either a standalone CLT + # package or the CLT component within Xcode. + cflags = _config_vars.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', cflags) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def compiler_fixup(compiler_so, cc_args): + """ + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + """ + stripArch = stripSysroot = False + + compiler_so = list(compiler_so) + + if not _supports_universal_builds(): + # OSX before 10.4.0, these don't support -arch and -isysroot at + # all. + stripArch = stripSysroot = True + else: + stripArch = '-arch' in cc_args + stripSysroot = '-isysroot' in cc_args + + if stripArch or 'ARCHFLAGS' in os.environ: + while True: + try: + index = compiler_so.index('-arch') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + if 'ARCHFLAGS' in os.environ and not stripArch: + # User specified different -arch flags in the environ, + # see also distutils.sysconfig + compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + + if stripSysroot: + while True: + try: + index = compiler_so.index('-isysroot') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + # Check if the SDK that is used during compilation actually exists, + # the universal build requires the usage of a universal SDK and not all + # users have that installed by default. + sysroot = None + if '-isysroot' in cc_args: + idx = cc_args.index('-isysroot') + sysroot = cc_args[idx+1] + elif '-isysroot' in compiler_so: + idx = compiler_so.index('-isysroot') + sysroot = compiler_so[idx+1] + + if sysroot and not os.path.isdir(sysroot): + from distutils import log + log.warn("Compiling with an SDK that doesn't seem to exist: %s", + sysroot) + log.warn("Please check your Xcode installation") + + return compiler_so + + +def customize_config_vars(_config_vars): + """Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extention module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + """ + + if not _supports_universal_builds(): + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + _remove_universal_flags(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + # Remove references to sdks that are not found + _check_for_unavailable_sdk(_config_vars) + + return _config_vars + + +def customize_compiler(_config_vars): + """Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + """ + + # Find a compiler to use for extension module builds + _find_appropriate_compiler(_config_vars) + + # Remove ppc arch flags if not supported here + _remove_unsupported_archs(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + return _config_vars + + +def get_platform_osx(_config_vars, osname, release, machine): + """Filter values for get_platform()""" + # called from get_platform() in sysconfig and distutils.util + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + + macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') + macrelease = _get_system_version() or macver + macver = macver or macrelease + + if macver: + release = macver + osname = "macosx" + + # Use the original CFLAGS value, if available, so that we + # return the same machine type for the platform string. + # Otherwise, distutils may consider this a cross-compiling + # case and disallow installs. + cflags = _config_vars.get(_INITPRE+'CFLAGS', + _config_vars.get('CFLAGS', '')) + if ((macrelease + '.') >= '10.4.' and + '-arch' in cflags.strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + + machine = 'fat' + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return (osname, release, machine) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -298,7 +298,7 @@ def seek(self, pos, whence=0): """Change stream position. - Change the stream position to byte offset offset. offset is + Change the stream position to byte offset pos. Argument pos is interpreted relative to the position indicated by whence. Values for whence are ints: @@ -889,12 +889,18 @@ return pos def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def writable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True @@ -1567,6 +1573,8 @@ return self._buffer def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return self._seekable def readable(self): diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -339,7 +339,7 @@ raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = 1900 + year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 @@ -444,6 +444,12 @@ else: tz = value break + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian == -1 and week_of_year != -1 and weekday != -1: @@ -472,6 +478,12 @@ else: gmtoff = None + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + return (year, month, day, hour, minute, second, weekday, julian, tz, gmtoff, tzname), fraction diff --git a/lib-python/3/_weakrefset.py b/lib-python/3/_weakrefset.py --- a/lib-python/3/_weakrefset.py +++ b/lib-python/3/_weakrefset.py @@ -63,7 +63,7 @@ yield item def __len__(self): - return sum(x() is not None for x in self.data) + return len(self.data) - len(self._pending_removals) def __contains__(self, item): try: @@ -114,36 +114,21 @@ def update(self, other): if self._pending_removals: self._commit_removals() - if isinstance(other, self.__class__): - self.data.update(other.data) - else: - for element in other: - self.add(element) + for element in other: + self.add(element) def __ior__(self, other): self.update(other) return self - # Helper functions for simple delegating methods. - def _apply(self, other, method): - if not isinstance(other, self.__class__): - other = self.__class__(other) - newdata = method(other.data) - newset = self.__class__() - newset.data = newdata + def difference(self, other): + newset = self.copy() + newset.difference_update(other) return newset - - def difference(self, other): - return self._apply(other, self.data.difference) __sub__ = difference def difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.difference_update(ref(item) for item in other) + self.__isub__(other) def __isub__(self, other): if self._pending_removals: self._commit_removals() @@ -154,13 +139,11 @@ return self def intersection(self, other): - return self._apply(other, self.data.intersection) + return self.__class__(item for item in other if item in self) __and__ = intersection def intersection_update(self, other): - if self._pending_removals: - self._commit_removals() - self.data.intersection_update(ref(item) for item in other) + self.__iand__(other) def __iand__(self, other): if self._pending_removals: self._commit_removals() @@ -169,17 +152,17 @@ def issubset(self, other): return self.data.issubset(ref(item) for item in other) - __lt__ = issubset + __le__ = issubset - def __le__(self, other): - return self.data <= set(ref(item) for item in other) + def __lt__(self, other): + return self.data < set(ref(item) for item in other) def issuperset(self, other): return self.data.issuperset(ref(item) for item in other) - __gt__ = issuperset + __ge__ = issuperset - def __ge__(self, other): - return self.data >= set(ref(item) for item in other) + def __gt__(self, other): + return self.data > set(ref(item) for item in other) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -187,27 +170,24 @@ return self.data == set(ref(item) for item in other) def symmetric_difference(self, other): - return self._apply(other, self.data.symmetric_difference) + newset = self.copy() + newset.symmetric_difference_update(other) + return newset __xor__ = symmetric_difference def symmetric_difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.__ixor__(other) def __ixor__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) return self def union(self, other): - return self._apply(other, self.data.union) + return self.__class__(e for s in (self, other) for e in s) __or__ = union def isdisjoint(self, other): diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -692,7 +692,9 @@ self._patchheader() def close(self): - if self._file: + if self._file is None: + return + try: self._ensure_header_written(0) if self._datawritten & 1: # quick pad to even size @@ -703,10 +705,12 @@ self._datalength != self._datawritten or \ self._marklength: self._patchheader() + finally: # Prevent ref cycles self._convert = None - self._file.close() + f = self._file self._file = None + f.close() # # Internal methods. diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -736,10 +736,10 @@ - default -- The value to be produced if the option is not specified. - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate @@ -1701,9 +1701,12 @@ return args def parse_known_args(self, args=None, namespace=None): - # args default to the system args if args is None: + # args default to the system args args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) # default Namespace built from parser defaults if namespace is None: @@ -1714,10 +1717,7 @@ if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, str): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) + setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: @@ -1957,12 +1957,23 @@ if positionals: self.error(_('too few arguments')) - # make sure all required actions were present + # make sure all required actions were present, and convert defaults. for action in self._actions: - if action.required: - if action not in seen_actions: + if action not in seen_actions: + if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: @@ -1988,7 +1999,7 @@ for arg_string in arg_strings: # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content @@ -2198,9 +2209,12 @@ # Value conversion methods # ======================== def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' + # for everything but PARSER, REMAINDER args, strip out first '--' if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] + try: + arg_strings.remove('--') + except ValueError: + pass # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: diff --git a/lib-python/3/asyncore.py b/lib-python/3/asyncore.py --- a/lib-python/3/asyncore.py +++ b/lib-python/3/asyncore.py @@ -225,6 +225,7 @@ debug = False connected = False accepting = False + connecting = False closing = False addr = None ignore_log_types = frozenset(['warning']) @@ -248,7 +249,7 @@ try: self.addr = sock.getpeername() except socket.error as err: - if err.args[0] == ENOTCONN: + if err.args[0] in (ENOTCONN, EINVAL): # To handle the case where we got an unconnected # socket. self.connected = False @@ -342,9 +343,11 @@ def connect(self, address): self.connected = False + self.connecting = True err = self.socket.connect_ex(address) if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ or err == EINVAL and os.name in ('nt', 'ce'): + self.addr = address return if err in (0, EISCONN): self.addr = address @@ -390,7 +393,7 @@ else: return data except socket.error as why: - # winsock sometimes throws ENOTCONN + # winsock sometimes raises ENOTCONN if why.args[0] in _DISCONNECTED: self.handle_close() return b'' @@ -400,6 +403,7 @@ def close(self): self.connected = False self.accepting = False + self.connecting = False self.del_channel() try: self.socket.close() @@ -438,7 +442,8 @@ # sockets that are connected self.handle_accept() elif not self.connected: - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_read() else: self.handle_read() @@ -449,6 +454,7 @@ raise socket.error(err, _strerror(err)) self.handle_connect() self.connected = True + self.connecting = False def handle_write_event(self): if self.accepting: @@ -457,12 +463,8 @@ return if not self.connected: - #check for errors - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - raise socket.error(err, _strerror(err)) - - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_write() def handle_expt_event(self): diff --git a/lib-python/3/bdb.py b/lib-python/3/bdb.py --- a/lib-python/3/bdb.py +++ b/lib-python/3/bdb.py @@ -22,6 +22,7 @@ self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} + self.frame_returning = None def canonic(self, filename): if filename == "<" + filename[1:-1] + ">": @@ -80,7 +81,11 @@ def dispatch_return(self, frame, arg): if self.stop_here(frame) or frame == self.returnframe: - self.user_return(frame, arg) + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None if self.quitting: raise BdbQuit return self.trace_dispatch @@ -186,6 +191,14 @@ def set_step(self): """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch self._set_stopinfo(None, None) def set_next(self, frame): diff --git a/lib-python/3/calendar.py b/lib-python/3/calendar.py --- a/lib-python/3/calendar.py +++ b/lib-python/3/calendar.py @@ -161,7 +161,11 @@ oneday = datetime.timedelta(days=1) while True: yield date - date += oneday + try: + date += oneday + except OverflowError: + # Adding one day could fail after datetime.MAXYEAR + break if date.month != month and date.weekday() == self.firstweekday: break diff --git a/lib-python/3/cgi.py b/lib-python/3/cgi.py --- a/lib-python/3/cgi.py +++ b/lib-python/3/cgi.py @@ -214,17 +214,17 @@ """ import http.client - boundary = "" + boundary = b"" if 'boundary' in pdict: boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError('Invalid boundary in multipart form: %r' % (boundary,)) - nextpart = "--" + boundary - lastpart = "--" + boundary + "--" + nextpart = b"--" + boundary + lastpart = b"--" + boundary + b"--" partdict = {} - terminator = "" + terminator = b"" while terminator != lastpart: bytes = -1 @@ -243,7 +243,7 @@ raise ValueError('Maximum content length exceeded') data = fp.read(bytes) else: - data = "" + data = b"" # Read lines until end of part. lines = [] while 1: @@ -251,7 +251,7 @@ if not line: terminator = lastpart # End outer loop break - if line.startswith("--"): + if line.startswith(b"--"): terminator = line.rstrip() if terminator in (nextpart, lastpart): break @@ -263,12 +263,12 @@ if lines: # Strip final line terminator line = lines[-1] - if line[-2:] == "\r\n": + if line[-2:] == b"\r\n": line = line[:-2] - elif line[-1:] == "\n": + elif line[-1:] == b"\n": line = line[:-1] lines[-1] = line - data = "".join(lines) + data = b"".join(lines) line = headers['content-disposition'] if not line: continue diff --git a/lib-python/3/cgitb.py b/lib-python/3/cgitb.py --- a/lib-python/3/cgitb.py +++ b/lib-python/3/cgitb.py @@ -293,14 +293,19 @@ if self.logdir is not None: suffix = ['.txt', '.html'][self.format=="html"] (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + try: file = os.fdopen(fd, 'w') file.write(doc) file.close() - msg = '

    %s contains the description of this error.' % path + msg = '%s contains the description of this error.' % path except: - msg = '

    Tried to save traceback to %s, but failed.' % path - self.file.write(msg + '\n') + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

    %s

    \n' % msg) + else: + self.file.write(msg + '\n') try: self.file.flush() except: pass diff --git a/lib-python/3/collections.py b/lib-python/3/collections.py --- a/lib-python/3/collections.py +++ b/lib-python/3/collections.py @@ -281,6 +281,10 @@ 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + return None + {field_defs} ''' diff --git a/lib-python/3/concurrent/futures/_base.py b/lib-python/3/concurrent/futures/_base.py --- a/lib-python/3/concurrent/futures/_base.py +++ b/lib-python/3/concurrent/futures/_base.py @@ -112,12 +112,14 @@ def __init__(self, num_pending_calls, stop_on_exception): self.num_pending_calls = num_pending_calls self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() super().__init__() def _decrement_pending_calls(self): - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() def add_result(self, future): super().add_result(future) @@ -517,7 +519,7 @@ """Returns a iterator equivalent to map(fn, iter). Args: - fn: A callable that will take take as many arguments as there are + fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -99,10 +99,9 @@ yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) - If section is given, return a list of tuples with (section_name, - section_proxy) for each section, including DEFAULTSECT. Otherwise, - return a list of tuples with (name, value) for each option - in the section. + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. @@ -852,6 +851,19 @@ value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] + def popitem(self): + """Remove a section from the parser and return it as + a (section_name, section_proxy) tuple. If no section is present, raise + KeyError. + + The section DEFAULT is never returned because it cannot be removed. + """ + for key in self.sections(): + value = self[key] + del self[key] + return key, value + raise KeyError + def optionxform(self, optionstr): return optionstr.lower() @@ -947,7 +959,8 @@ # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. - self.remove_section(key) + if key in self._sections: + self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): diff --git a/lib-python/3/ctypes/test/test_bitfields.py b/lib-python/3/ctypes/test/test_bitfields.py --- a/lib-python/3/ctypes/test/test_bitfields.py +++ b/lib-python/3/ctypes/test/test_bitfields.py @@ -246,5 +246,25 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] + @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + def test_uint32(self): + class X(Structure): + _fields_ = [("a", c_uint32, 32)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFDCBA987 + self.assertEqual(x.a, 0xFDCBA987) + + @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + def test_uint64(self): + class X(Structure): + _fields_ = [("a", c_uint64, 64)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFEDCBA9876543211 + self.assertEqual(x.a, 0xFEDCBA9876543211) + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_numbers.py b/lib-python/3/ctypes/test/test_numbers.py --- a/lib-python/3/ctypes/test/test_numbers.py +++ b/lib-python/3/ctypes/test/test_numbers.py @@ -220,6 +220,16 @@ # probably be changed: self.assertRaises(TypeError, c_int, c_long(42)) + def test_float_overflow(self): + import sys + big_int = int(sys.float_info.max) * 2 + for t in float_types + [c_longdouble]: + self.assertRaises(OverflowError, t, big_int) + if (hasattr(t, "__ctype_be__")): + self.assertRaises(OverflowError, t.__ctype_be__, big_int) + if (hasattr(t, "__ctype_le__")): + self.assertRaises(OverflowError, t.__ctype_le__, big_int) + ## def test_perf(self): ## check_perf() diff --git a/lib-python/3/ctypes/test/test_returnfuncptrs.py b/lib-python/3/ctypes/test/test_returnfuncptrs.py --- a/lib-python/3/ctypes/test/test_returnfuncptrs.py +++ b/lib-python/3/ctypes/test/test_returnfuncptrs.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +import os import _ctypes_test @@ -33,5 +34,34 @@ self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) self.assertRaises(TypeError, strchr, b"abcdef") + def test_from_dll(self): + dll = CDLL(_ctypes_test.__file__) + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(("my_strchr", dll)) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + + # Issue 6083: Reference counting bug + def test_from_dll_refcount(self): + class BadSequence(tuple): + def __getitem__(self, key): + if key == 0: + return "my_strchr" + if key == 1: + return CDLL(_ctypes_test.__file__) + raise IndexError + + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)( + BadSequence(("my_strchr", CDLL(_ctypes_test.__file__)))) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_structures.py b/lib-python/3/ctypes/test/test_structures.py --- a/lib-python/3/ctypes/test/test_structures.py +++ b/lib-python/3/ctypes/test/test_structures.py @@ -1,6 +1,7 @@ import unittest from ctypes import * from struct import calcsize +import _testcapi class SubclassesTest(unittest.TestCase): def test_subclass(self): @@ -199,6 +200,14 @@ "_pack_": -1} self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + # Issue 15989 + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.INT_MAX + 1} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.UINT_MAX + 2} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + def test_initializers(self): class Person(Structure): _fields_ = [("name", c_char*6), diff --git a/lib-python/3/ctypes/test/test_win32.py b/lib-python/3/ctypes/test/test_win32.py --- a/lib-python/3/ctypes/test/test_win32.py +++ b/lib-python/3/ctypes/test/test_win32.py @@ -3,6 +3,7 @@ from ctypes import * from ctypes.test import is_resource_enabled import unittest, sys +from test import support import _ctypes_test @@ -60,7 +61,9 @@ def test_COMError(self): from _ctypes import COMError - self.assertEqual(COMError.__doc__, "Raised when a COM method call failed.") + if support.HAVE_DOCSTRINGS: + self.assertEqual(COMError.__doc__, + "Raised when a COM method call failed.") ex = COMError(-1, "text", ("details",)) self.assertEqual(ex.hresult, -1) diff --git a/lib-python/3/curses/__init__.py b/lib-python/3/curses/__init__.py --- a/lib-python/3/curses/__init__.py +++ b/lib-python/3/curses/__init__.py @@ -5,7 +5,7 @@ import curses from curses import textpad - curses.initwin() + curses.initscr() ... """ diff --git a/lib-python/3/decimal.py b/lib-python/3/decimal.py --- a/lib-python/3/decimal.py +++ b/lib-python/3/decimal.py @@ -1555,7 +1555,13 @@ def __float__(self): """Float representation.""" - return float(str(self)) + if self._isnan(): + if self.is_snan(): + raise ValueError("Cannot convert signaling NaN to float") + s = "-nan" if self._sign else "nan" + else: + s = str(self) + return float(s) def __int__(self): """Converts self to an int, truncating if necessary.""" diff --git a/lib-python/3/distutils/__init__.py b/lib-python/3/distutils/__init__.py --- a/lib-python/3/distutils/__init__.py +++ b/lib-python/3/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.2.3" +__version__ = "3.2.5" #--end constants-- diff --git a/lib-python/3/distutils/command/bdist_rpm.py b/lib-python/3/distutils/command/bdist_rpm.py --- a/lib-python/3/distutils/command/bdist_rpm.py +++ b/lib-python/3/distutils/command/bdist_rpm.py @@ -3,7 +3,7 @@ Implements the Distutils 'bdist_rpm' command (create RPM source and binary distributions).""" -import sys, os +import subprocess, sys, os from distutils.core import Command from distutils.debug import DEBUG from distutils.util import get_platform @@ -190,7 +190,7 @@ if self.fix_python: self.python = sys.executable else: - self.python = "python" + self.python = "python3" elif self.fix_python: raise DistutilsOptionError( "--python and --fix-python are mutually exclusive options") @@ -320,6 +320,7 @@ rpm_cmd.append('-bb') else: rpm_cmd.append('-ba') + rpm_cmd.extend(['--define', '__python %s' % self.python]) if self.rpm3_mode: rpm_cmd.extend(['--define', '_topdir %s' % os.path.abspath(self.rpm_base)]) @@ -405,6 +406,21 @@ 'Summary: ' + self.distribution.get_description(), ] + # Workaround for #14443 which affects some RPM based systems such as + # RHEL6 (and probably derivatives) + vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}') + # Generate a potential replacement value for __os_install_post (whilst + # normalizing the whitespace to simplify the test for whether the + # invocation of brp-python-bytecompile passes in __python): + vendor_hook = '\n'.join([' %s \\' % line.strip() + for line in vendor_hook.splitlines()]) + problem = "brp-python-bytecompile \\\n" + fixed = "brp-python-bytecompile %{__python} \\\n" + fixed_hook = vendor_hook.replace(problem, fixed) + if fixed_hook != vendor_hook: + spec_file.append('# Workaround for http://bugs.python.org/issue14443') + spec_file.append('%define __os_install_post ' + fixed_hook + '\n') + # put locale summaries into spec file # XXX not supported for now (hard to put a dictionary # in a config file -- arg!) diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -682,13 +682,13 @@ # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() - if so_ext is None: - so_ext = get_config_var('SO') # fall-back + ext_suffix = _get_c_extension_suffix() + if ext_suffix is None: + ext_suffix = get_config_var('EXT_SUFFIX') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' - return os.path.join(*ext_path) + so_ext + ext_suffix = '_d.pyd' + return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): """Return the list of symbols that a shared extension has to diff --git a/lib-python/3/distutils/command/check.py b/lib-python/3/distutils/command/check.py --- a/lib-python/3/distutils/command/check.py +++ b/lib-python/3/distutils/command/check.py @@ -23,6 +23,9 @@ def system_message(self, level, message, *children, **kwargs): self.messages.append((level, message, children, kwargs)) + return nodes.system_message(message, level=level, + type=self.levels[level], + *children, **kwargs) HAS_DOCUTILS = True except Exception: diff --git a/lib-python/3/distutils/command/install.py b/lib-python/3/distutils/command/install.py --- a/lib-python/3/distutils/command/install.py +++ b/lib-python/3/distutils/command/install.py @@ -285,8 +285,8 @@ if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): - raise DistutilsOptionError("can't combine user with with prefix/" - "exec_prefix/home or install_(plat)base") + raise DistutilsOptionError("can't combine user with prefix, " + "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": diff --git a/lib-python/3/distutils/command/upload.py b/lib-python/3/distutils/command/upload.py --- a/lib-python/3/distutils/command/upload.py +++ b/lib-python/3/distutils/command/upload.py @@ -125,7 +125,7 @@ if self.sign: data['gpg_signature'] = (os.path.basename(filename) + ".asc", - open(filename+".asc").read()) + open(filename+".asc", "rb").read()) # set up the authentication user_pass = (self.username + ":" + self.password).encode('ascii') diff --git a/lib-python/3/distutils/config.py b/lib-python/3/distutils/config.py --- a/lib-python/3/distutils/config.py +++ b/lib-python/3/distutils/config.py @@ -4,7 +4,6 @@ that uses .pypirc in the distutils.command package. """ import os -import sys from configparser import ConfigParser from distutils.cmd import Command @@ -43,16 +42,8 @@ def _store_pypirc(self, username, password): """Creates a default .pypirc file.""" rc = self._get_rc_file() - f = open(rc, 'w') - try: + with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: f.write(DEFAULT_PYPIRC % (username, password)) - finally: - f.close() - try: - os.chmod(rc, 0o600) - except OSError: - # should do something better here - pass def _read_pypirc(self): """Reads the .pypirc file.""" diff --git a/lib-python/3/distutils/dir_util.py b/lib-python/3/distutils/dir_util.py --- a/lib-python/3/distutils/dir_util.py +++ b/lib-python/3/distutils/dir_util.py @@ -141,6 +141,10 @@ src_name = os.path.join(src, n) dst_name = os.path.join(dst, n) + if n.startswith('.nfs'): + # skip NFS rename files + continue + if preserve_symlinks and os.path.islink(src_name): link_dest = os.readlink(src_name) if verbose >= 1: diff --git a/lib-python/3/distutils/sysconfig.py b/lib-python/3/distutils/sysconfig.py --- a/lib-python/3/distutils/sysconfig.py +++ b/lib-python/3/distutils/sysconfig.py @@ -23,6 +23,3 @@ from distutils.sysconfig_cpython import * from distutils.sysconfig_cpython import _config_vars # needed by setuptools from distutils.sysconfig_cpython import _variable_rx # read_setup_file() - -_USE_CLANG = None - diff --git a/lib-python/3/distutils/sysconfig_cpython.py b/lib-python/3/distutils/sysconfig_cpython.py --- a/lib-python/3/distutils/sysconfig_cpython.py +++ b/lib-python/3/distutils/sysconfig_cpython.py @@ -146,7 +146,7 @@ "I don't know where Python installs its library " "on platform '%s'" % os.name) -_USE_CLANG = None + def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. @@ -155,42 +155,28 @@ varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": - (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' + + (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') newcc = None if 'CC' in os.environ: - newcc = os.environ['CC'] - elif sys.platform == 'darwin' and cc == 'gcc-4.2': - # Issue #13590: - # Since Apple removed gcc-4.2 in Xcode 4.2, we can no - # longer assume it is available for extension module builds. - # If Python was built with gcc-4.2, check first to see if - # it is available on this system; if not, try to use clang - # instead unless the caller explicitly set CC. - global _USE_CLANG - if _USE_CLANG is None: - from distutils import log - from subprocess import Popen, PIPE - p = Popen("! type gcc-4.2 && type clang && exit 2", - shell=True, stdout=PIPE, stderr=PIPE) - p.wait() - if p.returncode == 2: - _USE_CLANG = True - log.warn("gcc-4.2 not found, using clang instead") - else: - _USE_CLANG = False - if _USE_CLANG: - newcc = 'clang' - if newcc: - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - ldshared = newcc + ldshared[len(cc):] - cc = newcc + cc = os.environ['CC'] if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: @@ -225,7 +211,7 @@ linker_exe=cc, archiver=archiver) - compiler.shared_lib_extension = so_ext + compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): @@ -480,6 +466,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) @@ -499,6 +486,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" global _config_vars @@ -543,43 +531,11 @@ srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _config_vars[key] = flags - - else: - - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _config_vars[key] = flags + import _osx_support + _osx_support.customize_config_vars(_config_vars) if args: vals = [] diff --git a/lib-python/3/distutils/tests/test_bdist_dumb.py b/lib-python/3/distutils/tests/test_bdist_dumb.py --- a/lib-python/3/distutils/tests/test_bdist_dumb.py +++ b/lib-python/3/distutils/tests/test_bdist_dumb.py @@ -88,9 +88,9 @@ fp.close() contents = sorted(os.path.basename(fn) for fn in contents) - wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], - 'foo.%s.pyc' % imp.get_tag(), - 'foo.py'] + wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py'] + if not sys.dont_write_bytecode: + wanted.append('foo.%s.pyc' % imp.get_tag()) self.assertEqual(contents, sorted(wanted)) def test_suite(): diff --git a/lib-python/3/distutils/tests/test_bdist_msi.py b/lib-python/3/distutils/tests/test_bdist_msi.py --- a/lib-python/3/distutils/tests/test_bdist_msi.py +++ b/lib-python/3/distutils/tests/test_bdist_msi.py @@ -1,12 +1,11 @@ """Tests for distutils.command.bdist_msi.""" +import sys import unittest -import sys - from test.support import run_unittest - from distutils.tests import support - at unittest.skipUnless(sys.platform=="win32", "These tests are only for win32") + + at unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows') class BDistMSITestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): @@ -14,10 +13,11 @@ def test_minimal(self): # minimal test XXX need more tests from distutils.command.bdist_msi import bdist_msi - pkg_pth, dist = self.create_dist() + project_dir, dist = self.create_dist() cmd = bdist_msi(dist) cmd.ensure_finalized() + def test_suite(): return unittest.makeSuite(BDistMSITestCase) diff --git a/lib-python/3/distutils/tests/test_build_ext.py b/lib-python/3/distutils/tests/test_build_ext.py --- a/lib-python/3/distutils/tests/test_build_ext.py +++ b/lib-python/3/distutils/tests/test_build_ext.py @@ -73,8 +73,9 @@ self.assertEqual(xx.foo(2, 5), 7) self.assertEqual(xx.foo(13,15), 28) self.assertEqual(xx.new().demo(), None) - doc = 'This is a template module just for instruction.' - self.assertEqual(xx.__doc__, doc) + if support.HAVE_DOCSTRINGS: + doc = 'This is a template module just for instruction.' + self.assertEqual(xx.__doc__, doc) self.assertTrue(isinstance(xx.Null(), xx.Null)) self.assertTrue(isinstance(xx.Str(), xx.Str)) @@ -317,8 +318,8 @@ finally: os.chdir(old_wd) self.assertTrue(os.path.exists(so_file)) - so_ext = sysconfig.get_config_var('SO') - self.assertTrue(so_file.endswith(so_ext)) + ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, other_tmp_dir) @@ -327,7 +328,7 @@ cmd.run() so_file = cmd.get_outputs()[0] self.assertTrue(os.path.exists(so_file)) - self.assertTrue(so_file.endswith(so_ext)) + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, cmd.build_lib) @@ -354,7 +355,7 @@ self.assertEqual(lastdir, 'bar') def test_ext_fullpath(self): - ext = sysconfig.get_config_vars()['SO'] + ext = sysconfig.get_config_var('EXT_SUFFIX') # building lxml.etree inplace #etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') #etree_ext = Extension('lxml.etree', [etree_c]) diff --git a/lib-python/3/distutils/tests/test_dir_util.py b/lib-python/3/distutils/tests/test_dir_util.py --- a/lib-python/3/distutils/tests/test_dir_util.py +++ b/lib-python/3/distutils/tests/test_dir_util.py @@ -76,7 +76,6 @@ remove_tree(self.root_target, verbose=0) - def test_copy_tree_verbosity(self): mkpath(self.target, verbose=0) @@ -88,11 +87,8 @@ mkpath(self.target, verbose=0) a_file = os.path.join(self.target, 'ok.txt') - f = open(a_file, 'w') - try: + with open(a_file, 'w') as f: f.write('some content') - finally: - f.close() wanted = ['copying %s -> %s' % (a_file, self.target2)] copy_tree(self.target, self.target2, verbose=1) @@ -101,6 +97,21 @@ remove_tree(self.root_target, verbose=0) remove_tree(self.target2, verbose=0) + def test_copy_tree_skips_nfs_temp_files(self): + mkpath(self.target, verbose=0) + + a_file = os.path.join(self.target, 'ok.txt') + nfs_file = os.path.join(self.target, '.nfs123abc') + for f in a_file, nfs_file: + with open(f, 'w') as fh: + fh.write('some content') + + copy_tree(self.target, self.target2) + self.assertEqual(os.listdir(self.target2), ['ok.txt']) + + remove_tree(self.root_target, verbose=0) + remove_tree(self.target2, verbose=0) + def test_ensure_relative(self): if os.sep == '/': self.assertEqual(ensure_relative('/home/foo'), 'home/foo') diff --git a/lib-python/3/distutils/tests/test_install.py b/lib-python/3/distutils/tests/test_install.py --- a/lib-python/3/distutils/tests/test_install.py +++ b/lib-python/3/distutils/tests/test_install.py @@ -23,7 +23,7 @@ def _make_ext_name(modname): if os.name == 'nt' and sys.executable.endswith('_d.exe'): modname += '_d' - return modname + sysconfig.get_config_var('SO') + return modname + sysconfig.get_config_var('EXT_SUFFIX') class InstallTestCase(support.TempdirManager, @@ -165,7 +165,7 @@ cmd.home = 'home' self.assertRaises(DistutilsOptionError, cmd.finalize_options) - # can't combine user with with prefix/exec_prefix/home or + # can't combine user with prefix/exec_prefix/home or # install_(plat)base cmd.prefix = None cmd.user = 'user' diff --git a/lib-python/3/distutils/tests/test_msvc9compiler.py b/lib-python/3/distutils/tests/test_msvc9compiler.py --- a/lib-python/3/distutils/tests/test_msvc9compiler.py +++ b/lib-python/3/distutils/tests/test_msvc9compiler.py @@ -104,7 +104,7 @@ unittest.TestCase): def test_no_compiler(self): - # makes sure query_vcvarsall throws + # makes sure query_vcvarsall raises # a DistutilsPlatformError if the compiler # is not found from distutils.msvc9compiler import query_vcvarsall diff --git a/lib-python/3/distutils/tests/test_register.py b/lib-python/3/distutils/tests/test_register.py --- a/lib-python/3/distutils/tests/test_register.py +++ b/lib-python/3/distutils/tests/test_register.py @@ -1,5 +1,4 @@ """Tests for distutils.command.register.""" -import sys import os import unittest import getpass @@ -10,11 +9,14 @@ from distutils.command import register as register_module from distutils.command.register import register -from distutils.core import Distribution from distutils.errors import DistutilsSetupError -from distutils.tests import support -from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase +from distutils.tests.test_config import PyPIRCCommandTestCase + +try: + import docutils +except ImportError: + docutils = None PYPIRC_NOPASSWORD = """\ [distutils] @@ -193,6 +195,7 @@ self.assertEqual(headers['Content-length'], '290') self.assertTrue((b'tarek') in req.data) + @unittest.skipUnless(docutils is not None, 'needs docutils') def test_strict(self): # testing the script option # when on, the register command stops if @@ -205,13 +208,6 @@ cmd.strict = 1 self.assertRaises(DistutilsSetupError, cmd.run) - # we don't test the reSt feature if docutils - # is not installed - try: - import docutils - except ImportError: - return - # metadata are OK but long_description is broken metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'éxéxé', @@ -265,6 +261,22 @@ finally: del register_module.input + @unittest.skipUnless(docutils is not None, 'needs docutils') + def test_register_invalid_long_description(self): + description = ':funkie:`str`' # mimic Sphinx-specific markup + metadata = {'url': 'xxx', 'author': 'xxx', + 'author_email': 'xxx', + 'name': 'xxx', 'version': 'xxx', + 'long_description': description} + cmd = self._get_cmd(metadata) + cmd.ensure_finalized() + cmd.strict = True + inputs = Inputs('2', 'tarek', 'tarek at ziade.org') + register_module.input = inputs + self.addCleanup(delattr, register_module, 'input') + + self.assertRaises(DistutilsSetupError, cmd.run) + def test_check_metadata_deprecated(self): # makes sure make_metadata is deprecated cmd = self._get_cmd() diff --git a/lib-python/3/distutils/tests/test_sdist.py b/lib-python/3/distutils/tests/test_sdist.py --- a/lib-python/3/distutils/tests/test_sdist.py +++ b/lib-python/3/distutils/tests/test_sdist.py @@ -6,6 +6,7 @@ import zipfile from os.path import join from textwrap import dedent +from test.support import captured_stdout, check_warnings, run_unittest try: import zlib @@ -13,7 +14,6 @@ except ImportError: ZLIB_SUPPORT = False -from test.support import captured_stdout, check_warnings, run_unittest from distutils.command.sdist import sdist, show_formats from distutils.core import Distribution @@ -83,9 +83,8 @@ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') def test_prune_file_list(self): - # this test creates a package with some vcs dirs in it - # and launch sdist to make sure they get pruned - # on all systems + # this test creates a project with some VCS dirs and an NFS rename + # file, then launches sdist to check they get pruned on all systems # creating VCS directories with some files in them os.mkdir(join(self.tmp_dir, 'somecode', '.svn')) @@ -99,6 +98,8 @@ self.write_file((self.tmp_dir, 'somecode', '.git', 'ok'), 'xxx') + self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx') + # now building a sdist dist, cmd = self.get_cmd() @@ -326,6 +327,7 @@ # filling data_files by pointing files in package_data dist.package_data = {'somecode': ['*.txt']} self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#') + cmd.formats = ['gztar'] cmd.ensure_finalized() cmd.run() diff --git a/lib-python/3/distutils/tests/test_sysconfig.py b/lib-python/3/distutils/tests/test_sysconfig.py --- a/lib-python/3/distutils/tests/test_sysconfig.py +++ b/lib-python/3/distutils/tests/test_sysconfig.py @@ -102,7 +102,27 @@ import sysconfig as global_sysconfig self.assertEqual(global_sysconfig.get_config_var('CFLAGS'), sysconfig.get_config_var('CFLAGS')) self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'), sysconfig.get_config_var('LDFLAGS')) - self.assertEqual(global_sysconfig.get_config_var('LDSHARED'),sysconfig.get_config_var('LDSHARED')) + + @unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'),'compiler flags customized') + def test_sysconfig_compiler_vars(self): + # On OS X, binary installers support extension module building on + # various levels of the operating system with differing Xcode + # configurations. This requires customization of some of the + # compiler configuration directives to suit the environment on + # the installed machine. Some of these customizations may require From noreply at buildbot.pypy.org Thu Apr 10 03:31:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 10 Apr 2014 03:31:27 +0200 (CEST) Subject: [pypy-commit] pypy default: win32 also needs signal Message-ID: <20140410013127.132951C12F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70529:17a17f188b4a Date: 2014-04-09 18:30 -0700 http://bitbucket.org/pypy/pypy/changeset/17a17f188b4a/ Log: win32 also needs signal diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -19,7 +19,7 @@ usemodules += ['fcntl'] else: # On windows, os.popen uses the subprocess module - usemodules += ['_rawffi', 'thread'] + usemodules += ['_rawffi', 'thread', 'signal'] mod.space = gettestobjspace(usemodules=usemodules) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") From noreply at buildbot.pypy.org Thu Apr 10 03:20:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 10 Apr 2014 03:20:56 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test on win32 Message-ID: <20140410012056.624991C12F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70528:69db55ba6eaf Date: 2014-04-09 18:20 -0700 http://bitbucket.org/pypy/pypy/changeset/69db55ba6eaf/ Log: fix test on win32 diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -308,7 +308,11 @@ def test_fdopen_directory(self): import errno os = self.posix - fd = os.open('/', os.O_RDONLY) + try: + fd = os.open('/', os.O_RDONLY) + except OSError as e: + assert e.errno == errno.EACCES + skip("system cannot open directories") exc = raises(IOError, os.fdopen, fd, 'r') assert exc.value.errno == errno.EISDIR From noreply at buildbot.pypy.org Thu Apr 10 03:47:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 10 Apr 2014 03:47:57 +0200 (CEST) Subject: [pypy-commit] pypy default: another fix to make win32 happy Message-ID: <20140410014757.EF1AD1C022D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70530:9b2dff9b875f Date: 2014-04-09 18:47 -0700 http://bitbucket.org/pypy/pypy/changeset/9b2dff9b875f/ Log: another fix to make win32 happy diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -309,7 +309,7 @@ import errno os = self.posix try: - fd = os.open('/', os.O_RDONLY) + fd = os.open('.', os.O_RDONLY) except OSError as e: assert e.errno == errno.EACCES skip("system cannot open directories") From noreply at buildbot.pypy.org Thu Apr 10 10:45:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 10:45:30 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Tweaks to the preamble, answering more directly the question Message-ID: <20140410084530.499D81C10C6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r484:1bb1800c3a16 Date: 2014-04-10 10:45 +0200 http://bitbucket.org/pypy/pypy.org/changeset/1bb1800c3a16/ Log: Tweaks to the preamble, answering more directly the question "yes, it's a PyPy without the GIL". diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -49,15 +49,19 @@ We achieved – or overachieved – most goals laid out in the first call by a large margin, while at the same time raising only about half the money. The result of this first step is `described in the documentation -of PyPy`__. +of PyPy`__. It is a PyPy without the GIL. In the best (artificial) +examples, it runs only 30% slower than a regular PyPy with the JIT. -The present proposal is about development of the second half: starting -from the various missing low-level optimizations, it will most +The present proposal is about development of the second half: first, +fixing the various missing low-level optimizations (aiming for this +25%-30% figure, but for most cases rather than only special examples). +Then it will most importantly focus on developing the Python-facing interface. This includes both internal things (e.g. do dictionaries need to be more TM-friendly in general?) as well as directly visible things (e.g. some -profiler-like interface to explore common conflicts in a program). It -also includes exploring and tweaking some existing libraries to improve +profiler-like interface to explore common conflicts in a program). +Finally, the third part is +exploring and tweaking some existing libraries to improve their TM-friendliness (e.g. Twisted and Stackless). See also the `update on HTM`_ below. diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -80,14 +80,18 @@

    We achieved – or overachieved – most goals laid out in the first call by a large margin, while at the same time raising only about half the money. The result of this first step is described in the documentation -of PyPy.

    -

    The present proposal is about development of the second half: starting -from the various missing low-level optimizations, it will most +of PyPy. It is a PyPy without the GIL. In the best (artificial) +examples, it runs only 30% slower than a regular PyPy with the JIT.

    +

    The present proposal is about development of the second half: first, +fixing the various missing low-level optimizations (aiming for this +25%-30% figure, but for most cases rather than only special examples). +Then it will most importantly focus on developing the Python-facing interface. This includes both internal things (e.g. do dictionaries need to be more TM-friendly in general?) as well as directly visible things (e.g. some -profiler-like interface to explore common conflicts in a program). It -also includes exploring and tweaking some existing libraries to improve +profiler-like interface to explore common conflicts in a program). +Finally, the third part is +exploring and tweaking some existing libraries to improve their TM-friendliness (e.g. Twisted and Stackless).

    See also the update on HTM below.

    From noreply at buildbot.pypy.org Thu Apr 10 11:02:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 11:02:00 +0200 (CEST) Subject: [pypy-commit] pypy default: Document this difference Message-ID: <20140410090200.F11AC1D24CA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70531:5a5b5c41db43 Date: 2014-04-10 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/5a5b5c41db43/ Log: Document this difference diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -315,6 +315,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on slot wrappers. On PyPy we + can't tell the difference, and so ``ismethod(list.__add__)`` has to + return True. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that From noreply at buildbot.pypy.org Thu Apr 10 11:05:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 11:05:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Tweak: ismethod(method-wrapper) also returns False on CPython Message-ID: <20140410090546.454D51D24CA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70532:aa2817a1d5db Date: 2014-04-10 11:05 +0200 http://bitbucket.org/pypy/pypy/changeset/aa2817a1d5db/ Log: Tweak: ismethod(method-wrapper) also returns False on CPython diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -320,9 +320,9 @@ bound or unbound method objects. This can occasionally confuse some tools that inspect built-in types. For example, the standard library ``inspect`` module has a function ``ismethod()`` that returns - True on unbound method objects but False on slot wrappers. On PyPy we - can't tell the difference, and so ``ismethod(list.__add__)`` has to - return True. + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, and so + ``ismethod([].__add__)`` has to return True. * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the From noreply at buildbot.pypy.org Thu Apr 10 11:28:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 11:28:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Expand the example Message-ID: <20140410092802.CA14E1D2951@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70533:7ddd3c163570 Date: 2014-04-10 11:27 +0200 http://bitbucket.org/pypy/pypy/changeset/7ddd3c163570/ Log: Expand the example diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -321,8 +321,8 @@ tools that inspect built-in types. For example, the standard library ``inspect`` module has a function ``ismethod()`` that returns True on unbound method objects but False on method-wrappers or slot - wrappers. On PyPy we can't tell the difference, and so - ``ismethod([].__add__)`` has to return True. + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the From noreply at buildbot.pypy.org Thu Apr 10 12:35:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 12:35:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Prebuilt RPython 'iteritems' objects are not supported. In case we see any Message-ID: <20140410103545.04AC71C147D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70534:ea5fb9bdcfe1 Date: 2014-04-10 12:34 +0200 http://bitbucket.org/pypy/pypy/changeset/ea5fb9bdcfe1/ Log: Prebuilt RPython 'iteritems' objects are not supported. In case we see any W_BaseDictMultiIterObject instance, crash early (makes it easier to know where it is). diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -656,6 +656,10 @@ return self.len - self.pos return 0 + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class BaseKeyIterator(BaseIteratorImplementation): next_key = _new_next('key') @@ -1191,6 +1195,10 @@ w_ret = space.newtuple([new_inst, space.newtuple([w_res])]) return w_ret + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class W_DictMultiIterKeysObject(W_BaseDictMultiIterObject): def descr_next(self, space): From noreply at buildbot.pypy.org Thu Apr 10 12:36:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 12:36:58 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/0492e398156b Message-ID: <20140410103658.9B8501C147D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70535:3b8d9d990e35 Date: 2014-04-09 13:06 +0200 http://bitbucket.org/pypy/pypy/changeset/3b8d9d990e35/ Log: import stmgc/0492e398156b diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -859b241ec058 +0492e398156b diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -10,7 +10,7 @@ PROT_READ | PROT_WRITE, MAP_PAGES_FLAGS, -1, 0); if (result == MAP_FAILED) - stm_fatalerror("%s failed: %m\n", reason); + stm_fatalerror("%s failed: %m", reason); return result; } @@ -132,17 +132,37 @@ teardown_pages(); } +static void _shadowstack_trap_page(char *start, int prot) +{ + size_t bsize = STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s); + char *end = start + bsize + 4095; + end -= (((uintptr_t)end) & 4095); + mprotect(end, 4096, prot); +} + static void _init_shadow_stack(stm_thread_local_t *tl) { - struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) - malloc(STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s)); - assert(s); + size_t bsize = STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s); + char *start = malloc(bsize + 8192); /* for the trap page, plus rounding */ + if (!start) + stm_fatalerror("can't allocate shadow stack"); + + /* set up a trap page: if the shadowstack overflows, it will + crash in a clean segfault */ + _shadowstack_trap_page(start, PROT_NONE); + + struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; } static void _done_shadow_stack(stm_thread_local_t *tl) { + assert(tl->shadowstack >= tl->shadowstack_base); + + char *start = (char *)tl->shadowstack_base; + _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); + free(tl->shadowstack_base); tl->shadowstack = NULL; tl->shadowstack_base = NULL; diff --git a/rpython/translator/stm/src_stm/stm/weakref.c b/rpython/translator/stm/src_stm/stm/weakref.c --- a/rpython/translator/stm/src_stm/stm/weakref.c +++ b/rpython/translator/stm/src_stm/stm/weakref.c @@ -35,7 +35,7 @@ stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); long i; - for (i = 1; i <= NB_SEGMENTS; i++) { + for (i = 0; i <= NB_SEGMENTS; i++) { char *base = get_segment_base(i); object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); *ref_loc = value; @@ -58,11 +58,14 @@ a young outside nursery object. */ assert(_is_in_nursery(item)); object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + ssize_t size = 16; - /* the following checks are done like in nursery.c: */ - if (!(item->stm_flags & GCFLAG_HAS_SHADOW) - || (pforwarded_array[0] != GCWORD_MOVED)) { - /* weakref dies */ + /* check if the weakref object was moved out of the nursery */ + if (pforwarded_array[0] != GCWORD_MOVED) { + /* no: weakref dies */ +#ifndef NDEBUG + *WEAKREF_PTR(item, size) = (object_t *)-99; +#endif continue; } @@ -70,15 +73,13 @@ assert(!_is_young(item)); - ssize_t size = 16; object_t *pointing_to = *WEAKREF_PTR(item, size); assert(pointing_to != NULL); if (_is_in_nursery(pointing_to)) { object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; - /* the following checks are done like in nursery.c: */ - if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) - || (pforwarded_array[0] != GCWORD_MOVED)) { + /* check if the target was moved out of the nursery */ + if (pforwarded_array[0] != GCWORD_MOVED) { /* pointing_to dies */ _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ @@ -97,7 +98,9 @@ _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ } - /* pointing_to was already old */ + /* pointing_to is either a surviving young object outside + the nursery, or it was already old; in both cases keeping + the currently stored pointer is what we need */ } LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); })); @@ -129,7 +132,7 @@ stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); char *real_wr = REAL_ADDRESS(pseg->pub.segment_base, wr); object_t *pointing_to = *(object_t **)real_wr; - assert(pointing_to != NULL); + assert((uintptr_t)pointing_to >= NURSERY_END); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); _set_weakref_in_all_segments(weakref, NULL); From noreply at buildbot.pypy.org Thu Apr 10 12:36:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 12:36:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140410103659.F00111C147D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70536:576da3e3bdab Date: 2014-04-10 12:36 +0200 http://bitbucket.org/pypy/pypy/changeset/576da3e3bdab/ Log: merge heads diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -24,7 +24,7 @@ self.submodules_w = [] def install(self): - """NOT_RPYTHON: install this module, and it's submodules into + """NOT_RPYTHON: install this module, and its submodules into space.builtin_modules""" Module.install(self) if hasattr(self, "submodules"): @@ -33,6 +33,8 @@ for sub_name, module_cls in self.submodules.iteritems(): module_name = space.wrap("%s.%s" % (name, sub_name)) m = module_cls(space, module_name) + if hasattr(m, 'activate') and not m.activate(space): + continue m.install() self.submodules_w.append(m) diff --git a/pypy/interpreter/test/test_mixedmodule.py b/pypy/interpreter/test/test_mixedmodule.py --- a/pypy/interpreter/test/test_mixedmodule.py +++ b/pypy/interpreter/test/test_mixedmodule.py @@ -18,11 +18,25 @@ interpleveldefs = {} appleveldefs = {} + class SubModule1(MixedModule): + interpleveldefs = {} + appleveldefs = {} + def activate(self, space): + return True + + class SubModule2(MixedModule): + interpleveldefs = {} + appleveldefs = {} + def activate(self, space): + return False + class Module(MixedModule): interpleveldefs = {} appleveldefs = {} submodules = { - "sub": SubModule + "sub": SubModule, + "sub1": SubModule1, + "sub2": SubModule2, } m = Module(self.space, self.space.wrap("test_module")) @@ -30,6 +44,8 @@ assert self.space.builtin_modules["test_module"] is m assert isinstance(self.space.builtin_modules["test_module.sub"], SubModule) + assert "test_module.sub1" in self.space.builtin_modules + assert "test_module.sub2" not in self.space.builtin_modules class AppTestMixedModule(object): pytestmark = py.test.mark.skipif("config.option.runappdirect") diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -43,6 +43,8 @@ 'discard_last_abort_info': 'interp_atomic.discard_last_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', } + def activate(self, space): + return self.space.config.objspace.usemodules.thread class IntOpModule(MixedModule): From noreply at buildbot.pypy.org Thu Apr 10 14:43:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 14:43:16 +0200 (CEST) Subject: [pypy-commit] pypy default: In 59519f8875b6 I made 'withmethodcache' mandatory to have Message-ID: <20140410124316.DAB401C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70537:82e80a7333b2 Date: 2014-04-10 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/82e80a7333b2/ Log: In 59519f8875b6 I made 'withmethodcache' mandatory to have 'withmapdict'. This reverts this dependency, with Yet Another Lookup Method on W_TypeObjects, which can be called with or without having the method cache. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -217,7 +217,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withrangelist", diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -873,8 +873,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a TypeCell, which may change without changing the version_tag - assert space.config.objspace.std.withmethodcache - _, w_descr = w_type._pure_lookup_where_with_method_cache( + _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) @@ -932,9 +931,8 @@ # in the class, this time taking care of the result: it can be either a # quasi-constant class attribute, or actually a TypeCell --- which we # must not cache. (It should not be None here, but you never know...) - assert space.config.objspace.std.withmethodcache - _, w_method = w_type._pure_lookup_where_with_method_cache(name, - version_tag) + _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( + name, version_tag) if w_method is None or isinstance(w_method, TypeCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -371,6 +371,12 @@ w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) return w_class, unwrap_cell(space, w_value) + def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): + if w_self.space.config.objspace.std.withmethodcache: + return w_self._pure_lookup_where_with_method_cache(name, version_tag) + else: + return w_self._lookup_where_all_typeobjects(name) + @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space From noreply at buildbot.pypy.org Thu Apr 10 15:24:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 15:24:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: In 59519f8875b6 I made 'withmethodcache' mandatory to have Message-ID: <20140410132437.9F0501C147D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70538:c51e48d0ae86 Date: 2014-04-10 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/c51e48d0ae86/ Log: In 59519f8875b6 I made 'withmethodcache' mandatory to have 'withmapdict'. This reverts this dependency, with Yet Another Lookup Method on W_TypeObjects, which can be called with or without having the method cache. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -217,7 +217,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withrangelist", diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -873,8 +873,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a TypeCell, which may change without changing the version_tag - assert space.config.objspace.std.withmethodcache - _, w_descr = w_type._pure_lookup_where_with_method_cache( + _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) @@ -932,9 +931,8 @@ # in the class, this time taking care of the result: it can be either a # quasi-constant class attribute, or actually a TypeCell --- which we # must not cache. (It should not be None here, but you never know...) - assert space.config.objspace.std.withmethodcache - _, w_method = w_type._pure_lookup_where_with_method_cache(name, - version_tag) + _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( + name, version_tag) if w_method is None or isinstance(w_method, TypeCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -371,6 +371,12 @@ w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) return w_class, unwrap_cell(space, w_value) + def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): + if w_self.space.config.objspace.std.withmethodcache: + return w_self._pure_lookup_where_with_method_cache(name, version_tag) + else: + return w_self._lookup_where_all_typeobjects(name) + @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space From noreply at buildbot.pypy.org Thu Apr 10 15:24:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 15:24:38 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Trying to fix the parts of mapdict.py that are enabled and Message-ID: <20140410132438.D21321C147D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70539:49bd6de52d50 Date: 2014-04-10 15:23 +0200 http://bitbucket.org/pypy/pypy/changeset/49bd6de52d50/ Log: Trying to fix the parts of mapdict.py that are enabled and the parts that are disabled, in a -stm-jit translation diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -85,6 +85,12 @@ ------------------------------------------------------------ +Re-add the disabled optimization (only outside the jit): +(1) withmethodcache +(2) LOAD_ATTR_caching, LOOKUP_METHOD_mapdict + +------------------------------------------------------------ + diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -324,13 +324,15 @@ # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - #if not config.translation.stm: config.objspace.std.suggest(withmapdict=True) # tweaks some parameters with STM if config.translation.stm: config.objspace.std.suggest(methodcachesizeexp=9) - # XXX try at some point to see if withmapdict=True would make sense + # having both mapdict and methodcache together is a bad idea: + # it creates many conflicts + if config.objspace.std.withmapdict: + config.objspace.std.withmethodcache = False def enable_allworkingmodules(config): diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -123,7 +123,8 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: + if (self.space.config.objspace.std.withmapdict and + not self.space.config.translation.stm): from pypy.objspace.std.mapdict import init_mapdict_cache init_mapdict_cache(self) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -968,8 +968,9 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if (self.space.config.objspace.std.withmapdict and + not self.space.config.translation.stm and + not jit.we_are_jitted()): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -33,7 +33,9 @@ space = f.space w_obj = f.popvalue() - if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): + if (space.config.objspace.std.withmapdict and + not space.config.translation.stm and + not jit.we_are_jitted()): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return @@ -60,6 +62,7 @@ f.pushvalue(w_descr) f.pushvalue(w_obj) if (space.config.objspace.std.withmapdict and + not space.config.translation.stm and not jit.we_are_jitted()): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( From noreply at buildbot.pypy.org Thu Apr 10 15:42:23 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 10 Apr 2014 15:42:23 +0200 (CEST) Subject: [pypy-commit] benchmarks default: some fixes Message-ID: <20140410134223.2F0311C10C6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r249:034f0a5a1032 Date: 2014-04-10 15:43 +0200 http://bitbucket.org/pypy/benchmarks/changeset/034f0a5a1032/ Log: some fixes diff --git a/multithread/btree/btree.py b/multithread/btree/btree.py --- a/multithread/btree/btree.py +++ b/multithread/btree/btree.py @@ -1,6 +1,6 @@ # https://github.com/MartinThoma/algorithms/tree/master/datastructures -from common.abstract_threading import atomic, Future +from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool import time, threading import random @@ -330,6 +330,7 @@ threads = int(threads) operations = int(operations) + set_thread_pool(ThreadPool(threads)) thread_local.rnd = random tree = BTree(20) diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -28,9 +28,10 @@ class ThreadPool(object): - def __init__(self): + def __init__(self, n_workers=None): self.input_queue = Queue() - n_workers = getsegmentlimit() + if n_workers is None: + n_workers = getsegmentlimit() self.workers = [Worker(self.input_queue) for i in range(n_workers)] def add_task(self, func, *args, **kwds): @@ -46,6 +47,9 @@ _thread_pool = ThreadPool() atexit.register(_thread_pool.shutdown) +def set_thread_pool(th): + global _thread_pool + _thread_pool = th class Future(object): diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -2,7 +2,7 @@ # Date: 14.03.2013 from math import sqrt, pow, pi -from common.abstract_threading import atomic, Future +from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool import time AMBIENT = 0.1 @@ -133,6 +133,8 @@ (Vector(x/50.0-5,y/50.0-5,0)-cameraPos).normal()) trace(ray, objs, lightSource, 10) time.sleep(0) # XXX + return x + futures = [] def future_dispatcher(ths, *args): @@ -146,6 +148,7 @@ w = int(w) h = int(h) + set_thread_pool(ThreadPool(ths)) objs = [] objs.append(Sphere( Vector(-2,0,-10), 2, Vector(0,255,0))) objs.append(Sphere( Vector(2,0,-10), 3.5, Vector(255,0,0))) @@ -154,12 +157,12 @@ lightSource = Vector(0,10,0) cameraPos = Vector(0,0,20) - + for x in range(w): future_dispatcher(ths, x, h, cameraPos, objs, lightSource) for f in futures: - f() + print f() del futures[:] diff --git a/multithread/skiplist/skiplist.py b/multithread/skiplist/skiplist.py --- a/multithread/skiplist/skiplist.py +++ b/multithread/skiplist/skiplist.py @@ -1,6 +1,6 @@ # https://github.com/kunigami/blog-examples/tree/master/2012-09-23-skip-list -from common.abstract_threading import atomic, Future +from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool import time, threading import random @@ -87,7 +87,6 @@ OPS = [SkipList.find] * 98 + [SkipList.insert, SkipList.remove] - def task(id, slist, ops): print "start task with %s ops" % ops r = random.Random() @@ -114,6 +113,7 @@ threads = int(threads) operations = int(operations) + set_thread_pool(ThreadPool(threads)) thread_local.rnd = random slist = SkipList() From noreply at buildbot.pypy.org Thu Apr 10 16:11:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 16:11:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: The value 100 doesn't have a special meaning here, it's just a float Message-ID: <20140410141107.A1F801C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70540:6359d9323db1 Date: 2014-04-10 16:10 +0200 http://bitbucket.org/pypy/pypy/changeset/6359d9323db1/ Log: The value 100 doesn't have a special meaning here, it's just a float diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -38,7 +38,7 @@ void pypy_stm_set_transaction_length(double fraction) { - /* the value '100' means 'use the default'. Other values are + /* the value '1.0' means 'use the default'. Other values are interpreted proportionally, up to some maximum. */ long low_fill_mark = (long)(LOW_FILL_MARK * fraction); if (low_fill_mark > NURSERY_SIZE * 3 / 4) From noreply at buildbot.pypy.org Thu Apr 10 18:03:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 18:03:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: Get rid of the mutex_pages_lock. Use more careful lock-free algorithms. Message-ID: <20140410160314.65AB91C10C6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1151:2987d44d8714 Date: 2014-04-10 17:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/2987d44d8714/ Log: Get rid of the mutex_pages_lock. Use more careful lock-free algorithms. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -8,6 +8,22 @@ memset(write_locks, 0, sizeof(write_locks)); } +#ifdef NDEBUG +#define EVENTUALLY(condition) /* nothing */ +#else +#define EVENTUALLY(condition) \ + { \ + if (!(condition)) { \ + while (!__sync_bool_compare_and_swap( \ + &pages_privatizing.by_segment, 0, -1)) \ + spin_loop(); \ + if (!(condition)) \ + stm_fatalerror("fails: " #condition); \ + __sync_lock_release(&pages_privatizing.by_segment); \ + } \ + } +#endif + static void check_flag_write_barrier(object_t *obj) { /* check that all copies of the object, apart from mine, have the @@ -21,12 +37,7 @@ if (i == STM_SEGMENT->segment_num) continue; o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); - if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) { - mutex_pages_lock(); /* try again... */ - if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) - stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); - mutex_pages_unlock(); - } + EVENTUALLY(o1->stm_flags & GCFLAG_WRITE_BARRIER); } #endif } @@ -271,7 +282,6 @@ with synchronize_object_now() but I don't completely see how to improve... */ - assert(_has_mutex_pages()); assert(!_is_young(obj)); char *segment_base = get_segment_base(source_segment_num); @@ -326,10 +336,7 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. - - This must be called with the mutex_pages_lock! */ - assert(_has_mutex_pages()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); @@ -373,7 +380,7 @@ memcpy(dst, src, copy_size); } else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ + EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ } for (i = 1; i <= NB_SEGMENTS; i++) { @@ -392,7 +399,7 @@ memcpy(dst, src, copy_size); } else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ + EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ } } @@ -485,12 +492,10 @@ major_collection_now_at_safe_point(); /* synchronize overflow objects living in privatized pages */ - mutex_pages_lock(); push_overflow_objects_from_privatized_pages(); /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); - mutex_pages_unlock(); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -70,7 +70,6 @@ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); - mutex_pages_lock(); /* Make a new mmap at some other address, but of the same size as the standard mmap at stm_object_pages @@ -166,7 +165,6 @@ fork_big_copy = NULL; bool was_in_transaction = fork_was_in_transaction; - mutex_pages_unlock(); s_mutex_unlock(); if (!was_in_transaction) { @@ -203,7 +201,6 @@ /* this new process contains no other thread, so we can just release these locks early */ - mutex_pages_unlock(); s_mutex_unlock(); /* Move the copy of the mmap over the old one, overwriting it diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -38,6 +38,7 @@ static void grab_more_free_pages_for_small_allocations(void) { + abort();//XXX /* grab N (= GCPAGE_NUM_PAGES) pages out of the top addresses */ uintptr_t decrease_by = GCPAGE_NUM_PAGES * 4096; if (uninitialized_page_stop - uninitialized_page_start <= decrease_by) @@ -75,17 +76,23 @@ } +static int lock_growth_large = 0; + static char *allocate_outside_nursery_large(uint64_t size) { - /* thread-safe: use the lock of pages.c to prevent any remapping - from occurring under our feet */ - mutex_pages_lock(); - /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) stm_fatalerror("not enough memory!"); + if (LIKELY(addr + size <= uninitialized_page_start)) { + return addr; + } + + /* uncommon case: need to initialize some more pages */ + while (__sync_lock_test_and_set(&lock_growth_large, 1) != 0) + spin_loop(); + if (addr + size > uninitialized_page_start) { uintptr_t npages; npages = (addr + size - uninitialized_page_start) / 4096UL; @@ -95,11 +102,10 @@ stm_fatalerror("out of memory!"); /* XXX */ } setup_N_pages(uninitialized_page_start, npages); + __sync_synchronize(); uninitialized_page_start += npages * 4096UL; } - - mutex_pages_unlock(); - + __sync_lock_release(&lock_growth_large); return addr; } @@ -255,7 +261,6 @@ total_allocated by 4096. */ long i; - mutex_pages_lock(); for (i = 1; i <= NB_SEGMENTS; i++) { /* The 'modified_old_objects' list gives the list of objects @@ -305,7 +310,6 @@ for (i = 1; i <= NB_SEGMENTS; i++) { major_restore_private_bits_for_modified_objects(i); } - mutex_pages_unlock(); } @@ -464,9 +468,7 @@ static void sweep_large_objects(void) { - mutex_pages_lock(); _stm_largemalloc_sweep(); - mutex_pages_unlock(); } static void clean_write_locks(void) diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -106,20 +106,37 @@ */ -static dlist_t largebins[N_BINS]; -static mchunk_t *first_chunk, *last_chunk; + +static struct { + int lock; + mchunk_t *first_chunk, *last_chunk; + dlist_t largebins[N_BINS]; +} lm __attribute__((aligned(64))); + + +static void lm_lock(void) +{ + while (UNLIKELY(__sync_lock_test_and_set(&lm.lock, 1) != 0)) + spin_loop(); +} + +static void lm_unlock(void) +{ + assert(lm.lock == 1); + __sync_lock_release(&lm.lock); +} static void insert_unsorted(mchunk_t *new) { size_t index = LAST_BIN_INDEX(new->size) ? N_BINS - 1 : largebin_index(new->size); - new->d.next = &largebins[index]; - new->d.prev = largebins[index].prev; + new->d.next = &lm.largebins[index]; + new->d.prev = lm.largebins[index].prev; new->d.prev->next = &new->d; new->u.up = UU_UNSORTED; new->u.down = NULL; - largebins[index].prev = &new->d; + lm.largebins[index].prev = &new->d; } static int compare_chunks(const void *vchunk1, const void *vchunk2) @@ -139,8 +156,8 @@ static void really_sort_bin(size_t index) { - dlist_t *unsorted = largebins[index].prev; - dlist_t *end = &largebins[index]; + dlist_t *unsorted = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; dlist_t *scan = unsorted->prev; size_t count = 1; while (scan != end && data2chunk(scan)->u.up == UU_UNSORTED) { @@ -176,7 +193,7 @@ chunk1 = chunks[--count]; } size_t search_size = chunk1->size; - dlist_t *head = largebins[index].next; + dlist_t *head = lm.largebins[index].next; while (1) { if (head == end || data2chunk(head)->size < search_size) { @@ -218,8 +235,8 @@ static void sort_bin(size_t index) { - dlist_t *last = largebins[index].prev; - if (last != &largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) + dlist_t *last = lm.largebins[index].prev; + if (last != &lm.largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) really_sort_bin(index); } @@ -262,13 +279,15 @@ if (request_size < MIN_ALLOC_SIZE) request_size = MIN_ALLOC_SIZE; + lm_lock(); + size_t index = largebin_index(request_size); sort_bin(index); /* scan through the chunks of current bin in reverse order to find the smallest that fits. */ - dlist_t *scan = largebins[index].prev; - dlist_t *end = &largebins[index]; + dlist_t *scan = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; mchunk_t *mscan; while (scan != end) { mscan = data2chunk(scan); @@ -286,16 +305,17 @@ smallest item of the first non-empty bin, as it will be large enough. */ while (++index < N_BINS) { - if (largebins[index].prev != &largebins[index]) { + if (lm.largebins[index].prev != &lm.largebins[index]) { /* non-empty bin. */ sort_bin(index); - scan = largebins[index].prev; + scan = lm.largebins[index].prev; mscan = data2chunk(scan); goto found; } } /* not enough memory. */ + lm_unlock(); return NULL; found: @@ -336,12 +356,13 @@ mscan->prev_size = BOTH_CHUNKS_USED; increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); + lm_unlock(); + return (char *)&mscan->d; } -void _stm_large_free(char *data) +static void _large_free(mchunk_t *chunk) { - mchunk_t *chunk = data2chunk(data); assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -349,9 +370,12 @@ increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); #ifndef NDEBUG - assert(chunk->size >= sizeof(dlist_t)); - assert(chunk->size <= (((char *)last_chunk) - (char *)data)); - memset(data, 0xDE, chunk->size); + { + char *data = (char *)&chunk->d; + assert(chunk->size >= sizeof(dlist_t)); + assert(chunk->size <= (((char *)lm.last_chunk) - data)); + memset(data, 0xDE, chunk->size); + } #endif /* try to merge with the following chunk in memory */ @@ -408,10 +432,18 @@ insert_unsorted(chunk); } +void _stm_large_free(char *data) +{ + lm_lock(); + _large_free(data2chunk(data)); + lm_unlock(); +} + void _stm_large_dump(void) { - char *data = ((char *)first_chunk) + 16; + lm_lock(); + char *data = ((char *)lm.first_chunk) + 16; size_t prev_size_if_free = 0; fprintf(stderr, "\n"); while (1) { @@ -446,12 +478,13 @@ data += 16; } fprintf(stderr, "\n %p: end. ]\n\n", data - 8); - assert(data - 16 == (char *)last_chunk); + assert(data - 16 == (char *)lm.last_chunk); + lm_unlock(); } char *_stm_largemalloc_data_start(void) { - return (char *)first_chunk; + return (char *)lm.first_chunk; } #ifdef STM_LARGEMALLOC_TEST @@ -462,21 +495,23 @@ { int i; for (i = 0; i < N_BINS; i++) { - largebins[i].prev = &largebins[i]; - largebins[i].next = &largebins[i]; + lm.largebins[i].prev = &lm.largebins[i]; + lm.largebins[i].next = &lm.largebins[i]; } assert(data_size >= 2 * sizeof(struct malloc_chunk)); assert((data_size & 31) == 0); - first_chunk = (mchunk_t *)data_start; - first_chunk->prev_size = THIS_CHUNK_FREE; - first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; - last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE); - last_chunk->prev_size = first_chunk->size; - last_chunk->size = END_MARKER; - assert(last_chunk == next_chunk(first_chunk)); + lm.first_chunk = (mchunk_t *)data_start; + lm.first_chunk->prev_size = THIS_CHUNK_FREE; + lm.first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; + lm.last_chunk = chunk_at_offset(lm.first_chunk, + data_size - CHUNK_HEADER_SIZE); + lm.last_chunk->prev_size = lm.first_chunk->size; + lm.last_chunk->size = END_MARKER; + assert(lm.last_chunk == next_chunk(lm.first_chunk)); + lm.lock = 0; - insert_unsorted(first_chunk); + insert_unsorted(lm.first_chunk); #ifdef STM_LARGEMALLOC_TEST _stm_largemalloc_keep = NULL; @@ -485,57 +520,64 @@ int _stm_largemalloc_resize_arena(size_t new_size) { + int result = 0; + lm_lock(); + if (new_size < 2 * sizeof(struct malloc_chunk)) - return 0; + goto fail; OPT_ASSERT((new_size & 31) == 0); new_size -= CHUNK_HEADER_SIZE; - mchunk_t *new_last_chunk = chunk_at_offset(first_chunk, new_size); - mchunk_t *old_last_chunk = last_chunk; - size_t old_size = ((char *)old_last_chunk) - (char *)first_chunk; + mchunk_t *new_last_chunk = chunk_at_offset(lm.first_chunk, new_size); + mchunk_t *old_last_chunk = lm.last_chunk; + size_t old_size = ((char *)old_last_chunk) - (char *)lm.first_chunk; if (new_size < old_size) { /* check if there is enough free space at the end to allow such a reduction */ - size_t lsize = last_chunk->prev_size; + size_t lsize = lm.last_chunk->prev_size; assert(lsize != THIS_CHUNK_FREE); if (lsize == BOTH_CHUNKS_USED) - return 0; + goto fail; lsize += CHUNK_HEADER_SIZE; - mchunk_t *prev_chunk = chunk_at_offset(last_chunk, -lsize); + mchunk_t *prev_chunk = chunk_at_offset(lm.last_chunk, -lsize); if (((char *)new_last_chunk) < ((char *)prev_chunk) + sizeof(struct malloc_chunk)) - return 0; + goto fail; /* unlink the prev_chunk from the doubly-linked list */ unlink_chunk(prev_chunk); /* reduce the prev_chunk */ - assert(prev_chunk->size == last_chunk->prev_size); + assert(prev_chunk->size == lm.last_chunk->prev_size); prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk - CHUNK_HEADER_SIZE; /* make a fresh-new last chunk */ new_last_chunk->prev_size = prev_chunk->size; new_last_chunk->size = END_MARKER; - last_chunk = new_last_chunk; - assert(last_chunk == next_chunk(prev_chunk)); + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(prev_chunk)); insert_unsorted(prev_chunk); } else if (new_size > old_size) { /* make the new last chunk first, with only the extra size */ - mchunk_t *old_last_chunk = last_chunk; + mchunk_t *old_last_chunk = lm.last_chunk; old_last_chunk->size = (new_size - old_size) - CHUNK_HEADER_SIZE; new_last_chunk->prev_size = BOTH_CHUNKS_USED; new_last_chunk->size = END_MARKER; - last_chunk = new_last_chunk; - assert(last_chunk == next_chunk(old_last_chunk)); + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(old_last_chunk)); /* then free the last_chunk (turn it from "used" to "free) */ - _stm_large_free((char *)&old_last_chunk->d); + _large_free(old_last_chunk); } - return 1; + + result = 1; + fail: + lm_unlock(); + return result; } @@ -550,15 +592,17 @@ void _stm_largemalloc_sweep(void) { - /* This may be slightly optimized by inlining _stm_large_free() and + lm_lock(); + + /* This may be slightly optimized by inlining _large_free() and making cases, e.g. we might know already if the previous block was free or not. It's probably not really worth it. */ - mchunk_t *mnext, *chunk = first_chunk; + mchunk_t *mnext, *chunk = lm.first_chunk; if (chunk->prev_size == THIS_CHUNK_FREE) chunk = next_chunk(chunk); /* go to the first non-free chunk */ - while (chunk != last_chunk) { + while (chunk != lm.last_chunk) { /* here, the chunk we're pointing to is not free */ assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -570,8 +614,10 @@ /* use the callback to know if 'chunk' contains an object that survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { - _stm_large_free((char *)&chunk->d); /* dies */ + _large_free(chunk); /* dies */ } chunk = mnext; } + + lm_unlock(); } diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -75,21 +75,6 @@ uint64_t _stm_total_allocated(void) { - mutex_pages_lock(); - uint64_t result = increment_total_allocated(0); - mutex_pages_unlock(); - return result; + return increment_total_allocated(0); } #endif - -#ifdef STM_LARGEMALLOC_TEST -void _stm_mutex_pages_lock(void) -{ - mutex_pages_lock(); -} - -void _stm_mutex_pages_unlock(void) -{ - mutex_pages_unlock(); -} -#endif diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -195,9 +195,7 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) { - mutex_pages_lock(); synchronize_object_now(obj); - mutex_pages_unlock(); } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); @@ -233,20 +231,13 @@ /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { - bool locked = false; wlog_t *item; + TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); - if (!locked) { - mutex_pages_lock(); - locked = true; - } _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; - if (locked) - mutex_pages_unlock(); - tree_clear(pseg->young_outside_nursery); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -5,16 +5,12 @@ /************************************************************/ -static union { - struct { - uint8_t mutex_pages; - volatile bool major_collection_requested; - uint64_t total_allocated; /* keep track of how much memory we're - using, ignoring nurseries */ - uint64_t total_allocated_bound; - }; - char reserved[64]; -} pages_ctl __attribute__((aligned(64))); +struct { + volatile bool major_collection_requested; + uint64_t total_allocated; /* keep track of how much memory we're + using, ignoring nurseries */ + uint64_t total_allocated_bound; +} pages_ctl; static void setup_pages(void) @@ -28,37 +24,15 @@ memset(pages_privatized, 0, sizeof(pages_privatized)); } -static void mutex_pages_lock(void) -{ - if (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) == 0) - return; - - int previous = change_timing_state(STM_TIME_SPIN_LOOP); - while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { - spin_loop(); - } - change_timing_state(previous); -} - -static void mutex_pages_unlock(void) -{ - __sync_lock_release(&pages_ctl.mutex_pages); -} - -static bool _has_mutex_pages(void) -{ - return pages_ctl.mutex_pages != 0; -} - static uint64_t increment_total_allocated(ssize_t add_or_remove) { - assert(_has_mutex_pages()); - pages_ctl.total_allocated += add_or_remove; + uint64_t ta = __sync_add_and_fetch(&pages_ctl.total_allocated, + add_or_remove); - if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) + if (ta >= pages_ctl.total_allocated_bound) pages_ctl.major_collection_requested = true; - return pages_ctl.total_allocated; + return ta; } static bool is_major_collection_requested(void) @@ -117,10 +91,12 @@ /* call remap_file_pages() to make all pages in the range(pagenum, pagenum+count) refer to the same physical range of pages from segment 0. */ - uintptr_t i; - assert(_has_mutex_pages()); + dprintf(("pages_initialize_shared: 0x%ld - 0x%ld\n", pagenum, + pagenum + count)); + assert(pagenum < NB_PAGES); if (count == 0) return; + uintptr_t i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, @@ -130,14 +106,23 @@ static void page_privatize(uintptr_t pagenum) { - if (is_private_page(STM_SEGMENT->segment_num, pagenum)) { - /* the page is already privatized */ + /* check this thread's 'pages_privatized' bit */ + uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); + struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps->by_segment & bitmask) { + /* the page is already privatized; nothing to do */ return; } - /* lock, to prevent concurrent threads from looking up this thread's - 'pages_privatized' bits in parallel */ - mutex_pages_lock(); +#ifndef NDEBUG + while (__sync_fetch_and_or(&pages_privatizing.by_segment, bitmask) + & bitmask) { + spin_loop(); + } +#endif + + /* add this thread's 'pages_privatized' bit */ + __sync_fetch_and_add(&ps->by_segment, bitmask); /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -151,11 +136,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); - /* add this thread's 'pages_privatized' bit */ - uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - pages_privatized[pagenum - PAGE_FLAG_START].by_segment |= bitmask; - - mutex_pages_unlock(); +#ifndef NDEBUG + __sync_fetch_and_sub(&pages_privatizing.by_segment, bitmask); +#endif } static void _page_do_reshare(long segnum, uintptr_t pagenum) diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -34,6 +34,20 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; +/* Rules for concurrent access to this array, possibly with is_private_page(): + + - we clear bits only during major collection, when all threads are + synchronized anyway + + - we set only the bit corresponding to our segment number, using + an atomic addition; and we do it _before_ we actually make the + page private. + + - concurrently, other threads checking the bits might (rarely) + get the answer 'true' to is_private_page() even though it is not + actually private yet. This inconsistency is in the direction + that we want for synchronize_object_now(). +*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -41,10 +55,6 @@ static void _page_do_reshare(long segnum, uintptr_t pagenum); static void pages_setup_readmarkers_for_nursery(void); -/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ -static void mutex_pages_lock(void); -static void mutex_pages_unlock(void); -static bool _has_mutex_pages(void) __attribute__((unused)); static uint64_t increment_total_allocated(ssize_t add_or_remove); static bool is_major_collection_requested(void); static void force_major_collection_request(void); @@ -63,4 +73,6 @@ page_reshare(pagenum); } -void _stm_mutex_pages_lock(void); +#ifndef NDEBUG +static struct page_shared_s pages_privatizing = { 0 }; +#endif diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -55,7 +55,6 @@ "minor gc", "major gc", "sync pause", - "spin loop", }; void stm_flush_timing(stm_thread_local_t *tl, int verbose) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -70,7 +70,6 @@ STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, STM_TIME_SYNC_PAUSE, - STM_TIME_SPIN_LOOP, _STM_TIME_N }; @@ -133,8 +132,6 @@ object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); uint64_t _stm_total_allocated(void); -void _stm_mutex_pages_lock(void); -void _stm_mutex_pages_unlock(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -91,8 +91,6 @@ void stm_collect(long level); uint64_t _stm_total_allocated(void); -void _stm_mutex_pages_lock(void); -void _stm_mutex_pages_unlock(void); long stm_identityhash(object_t *obj); long stm_id(object_t *obj); diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -14,7 +14,6 @@ lib.memset(self.rawmem, 0xcd, self.size) lib._stm_largemalloc_init_arena(self.rawmem, self.size) - lib._stm_mutex_pages_lock() # for this file def test_simple(self): # From noreply at buildbot.pypy.org Thu Apr 10 19:55:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 19:55:18 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add "(does not update automatically)" below the number of $ Message-ID: <20140410175518.E9E141D2AE9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r485:26461ce67aff Date: 2014-04-10 19:55 +0200 http://bitbucket.org/pypy/pypy.org/changeset/26461ce67aff/ Log: Add "(does not update automatically)" below the number of $ diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,6 +17,7 @@ 2nd call: $0 of $80000 (0.0%) +
    (does not update automatically)
    From noreply at buildbot.pypy.org Thu Apr 10 19:56:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Apr 2014 19:56:06 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Remove the "draft version" banner Message-ID: <20140410175606.1D49E1D2AE9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r486:9e772296792c Date: 2014-04-10 19:56 +0200 http://bitbucket.org/pypy/pypy.org/changeset/9e772296792c/ Log: Remove the "draft version" banner diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -9,8 +9,6 @@ .. class:: download_menu - **--- DRAFT VERSION ---** - * `Preamble`_ * `Introduction`_ diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -45,7 +45,6 @@

    2nd Call for donations - Transactional Memory in PyPy

    -

    – DRAFT VERSION –

    • Preamble
    • Introduction
    • From noreply at buildbot.pypy.org Thu Apr 10 20:36:21 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 10 Apr 2014 20:36:21 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140410183621.D08131D2951@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70541:ec295dbad48a Date: 2014-04-10 11:35 -0700 http://bitbucket.org/pypy/pypy/changeset/ec295dbad48a/ Log: merge default diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -215,7 +215,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withliststrategies", diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -315,6 +315,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -586,6 +586,10 @@ return self.len - self.pos return 0 + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class BaseKeyIterator(BaseIteratorImplementation): next_key = _new_next('key') @@ -1099,6 +1103,10 @@ w_ret = space.newtuple([new_inst, space.newtuple([w_res])]) return w_ret + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class W_DictMultiIterKeysObject(W_BaseDictMultiIterObject): def descr_next(self, space): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -871,8 +871,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a TypeCell, which may change without changing the version_tag - assert space.config.objspace.std.withmethodcache - _, w_descr = w_type._pure_lookup_where_with_method_cache( + _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) @@ -930,9 +929,8 @@ # in the class, this time taking care of the result: it can be either a # quasi-constant class attribute, or actually a TypeCell --- which we # must not cache. (It should not be None here, but you never know...) - assert space.config.objspace.std.withmethodcache - _, w_method = w_type._pure_lookup_where_with_method_cache(name, - version_tag) + _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( + name, version_tag) if w_method is None or isinstance(w_method, TypeCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -369,6 +369,12 @@ w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) return w_class, unwrap_cell(space, w_value) + def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): + if w_self.space.config.objspace.std.withmethodcache: + return w_self._pure_lookup_where_with_method_cache(name, version_tag) + else: + return w_self._lookup_where_all_typeobjects(name) + @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space diff --git a/rpython/jit/backend/x86/support.py b/rpython/jit/backend/x86/support.py --- a/rpython/jit/backend/x86/support.py +++ b/rpython/jit/backend/x86/support.py @@ -7,11 +7,12 @@ extra = ['-DPYPY_X86_CHECK_SSE2'] if sys.platform != 'win32': extra += ['-msse2', '-mfpmath=sse'] + else: + extra += ['/arch:SSE2'] else: extra = [] # the -m options above are always on by default on x86-64 -if sys.platform != 'win32': - extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra +extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = extra, From noreply at buildbot.pypy.org Fri Apr 11 00:13:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 00:13:38 +0200 (CEST) Subject: [pypy-commit] jitviewer default: Found after much digging a minimal change that *at least* display all information. Badly. Message-ID: <20140410221338.8B26A1C12F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r258:62ad3e746dac Date: 2014-04-11 00:13 +0200 http://bitbucket.org/pypy/jitviewer/changeset/62ad3e746dac/ Log: Found after much digging a minimal change that *at least* display all information. Badly. diff --git a/_jitviewer/templates/loop.html b/_jitviewer/templates/loop.html --- a/_jitviewer/templates/loop.html +++ b/_jitviewer/templates/loop.html @@ -3,7 +3,7 @@ {% endif %}
      {{ source.inputargs|safe }} {% for sourceline in source.lines %} - {% if sourceline.in_loop %} + {% if sourceline.in_loop or sourceline.chunks %}
      {{sourceline.line}}
      {% if sourceline.chunks %}
      From noreply at buildbot.pypy.org Fri Apr 11 01:27:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 11 Apr 2014 01:27:03 +0200 (CEST) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140410232703.757161C0606@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70542:5915a918a053 Date: 2014-04-10 16:26 -0700 http://bitbucket.org/pypy/pypy/changeset/5915a918a053/ Log: py3k compat diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -48,11 +48,9 @@ raises(OverflowError, _cffi_backend.newp, SHORT_ARRAY, [-40000]) def test_fast_init_longlong_from_list(self): - if type(2 ** 50) is long: - large_int = 2 ** 30 - else: - large_int = 2 ** 50 import _cffi_backend + import sys + large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) LONGLONG = _cffi_backend.new_primitive_type('long long') P_LONGLONG = _cffi_backend.new_pointer_type(LONGLONG) LONGLONG_ARRAY = _cffi_backend.new_array_type(P_LONGLONG, None) From noreply at buildbot.pypy.org Fri Apr 11 10:31:28 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 11 Apr 2014 10:31:28 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: another copy-pasted bit of code Message-ID: <20140411083128.EE3A91C3334@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70543:2412947d5c0f Date: 2014-04-11 00:15 +0200 http://bitbucket.org/pypy/pypy/changeset/2412947d5c0f/ Log: another copy-pasted bit of code diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -286,13 +286,7 @@ self.boxes_created_this_iteration = {} i = 0 while i < len(newoperations): - op = newoperations[i] - self.boxes_created_this_iteration[op.result] = None - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - for a in args: - self.import_box(a, inputargs, short_jumpargs, []) + self._import_op(newoperations[i], inputargs, short_jumpargs, []) i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) @@ -346,19 +340,7 @@ self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: - op = newoperations[i] - - self.boxes_created_this_iteration[op.result] = None - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - - #if self.optimizer.loop.logops: - # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) - for a in args: - #if self.optimizer.loop.logops: - # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short_jumpargs, jumpargs) + self._import_op(newoperations[i], inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -514,6 +496,16 @@ box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) + + def _import_op(self, op, inputargs, short_jumpargs, jumpargs): + self.boxes_created_this_iteration[op.result] = None + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + + for a in args: + self.import_box(a, inputargs, short_jumpargs, jumpargs) + def jump_to_already_compiled_trace(self, jumpop, patchguardop): assert jumpop.getopnum() == rop.JUMP cell_token = jumpop.getdescr() From noreply at buildbot.pypy.org Fri Apr 11 11:11:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 11:11:09 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add a "see also" link to the doc Message-ID: <20140411091109.108A61C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r487:8c3c298c6cd2 Date: 2014-04-11 11:10 +0200 http://bitbucket.org/pypy/pypy.org/changeset/8c3c298c6cd2/ Log: Add a "see also" link to the doc diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -88,6 +88,7 @@
      f = open("filename", "w")
      f.write("stuff")
      f.close()

      or using the with keyword

      with open("filename", "w") as f:
      f.write("stuff")
      +

      See more details here.

    • For the same reason, some functions and attributes of the gc module behave in a slightly different way: for example, gc.enable and diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -74,6 +74,8 @@ with open("filename", "w") as f: f.write("stuff") + See `more details here`_. + * For the same reason, some functions and attributes of the ``gc`` module behave in a slightly different way: for example, ``gc.enable`` and ``gc.disable`` are supported, but instead of enabling and disabling the GC, @@ -98,5 +100,6 @@ .. _`CPython C API`: http://docs.python.org/c-api/ .. _`standard library modules`: http://docs.python.org/library/ .. _`our dev site`: http://pypy.readthedocs.org/en/latest/cpython_differences.html +.. _`more details here`: http://pypy.readthedocs.org/en/latest/cpython_differences.html#differences-related-to-garbage-collection-strategies .. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home From noreply at buildbot.pypy.org Fri Apr 11 11:11:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 11:11:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Document in even more details the issue of delayed __del__, Message-ID: <20140411091142.14F731C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70544:25176f5d15bf Date: 2014-04-11 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/25176f5d15bf/ Log: Document in even more details the issue of delayed __del__, prompted by issue 878. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less From noreply at buildbot.pypy.org Fri Apr 11 11:15:20 2014 From: noreply at buildbot.pypy.org (techtonik) Date: Fri, 11 Apr 2014 11:15:20 +0200 (CEST) Subject: [pypy-commit] pypy default: makeref: Write file in binary mode for compatibility with Windows Message-ID: <20140411091520.78F5B1C02AF@cobra.cs.uni-duesseldorf.de> Author: anatoly techtonik Branch: Changeset: r70545:98d1457a6766 Date: 2014-04-10 13:52 +0300 http://bitbucket.org/pypy/pypy/changeset/98d1457a6766/ Log: makeref: Write file in binary mode for compatibility with Windows diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -52,7 +52,7 @@ lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) lines.append('') - reffile.write("\n".join(lines)) + reffile.write("\n".join(lines), mode="wb") print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x From noreply at buildbot.pypy.org Fri Apr 11 11:15:21 2014 From: noreply at buildbot.pypy.org (techtonik) Date: Fri, 11 Apr 2014 11:15:21 +0200 (CEST) Subject: [pypy-commit] pypy default: makeref: Execute from any directory Message-ID: <20140411091521.B3D2D1C02AF@cobra.cs.uni-duesseldorf.de> Author: anatoly techtonik Branch: Changeset: r70546:9f856c1374ce Date: 2014-04-10 13:57 +0300 http://bitbucket.org/pypy/pypy/changeset/9f856c1374ce/ Log: makeref: Execute from any directory diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy From noreply at buildbot.pypy.org Fri Apr 11 11:15:22 2014 From: noreply at buildbot.pypy.org (techtonik) Date: Fri, 11 Apr 2014 11:15:22 +0200 (CEST) Subject: [pypy-commit] pypy default: makeref: Record how _ref.txt is made and when it is regenerated Message-ID: <20140411091522.E76E31C02AF@cobra.cs.uni-duesseldorf.de> Author: anatoly techtonik Branch: Changeset: r70547:b7703990f632 Date: 2014-04-10 14:28 +0300 http://bitbucket.org/pypy/pypy/changeset/b7703990f632/ Log: makeref: Record how _ref.txt is made and when it is regenerated diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,3 +1,6 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -60,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines), mode="wb") + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x From noreply at buildbot.pypy.org Fri Apr 11 11:15:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 11:15:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in techtonik/pypy (pull request #225) Message-ID: <20140411091524.1443D1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70548:e5faefcb6e67 Date: 2014-04-11 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/e5faefcb6e67/ Log: Merged in techtonik/pypy (pull request #225) doc/tool/makeref.py: Windows compatibility and origin info in _ref.txt diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,3 +1,6 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x From noreply at buildbot.pypy.org Fri Apr 11 12:48:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 12:48:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Update to cffi/237031079adc Message-ID: <20140411104821.2EB621C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70549:9d1e757b1e78 Date: 2014-04-11 12:47 +0200 http://bitbucket.org/pypy/pypy/changeset/9d1e757b1e78/ Log: Update to cffi/237031079adc diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1091,7 +1091,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1101,7 +1101,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1113,7 +1113,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') From noreply at buildbot.pypy.org Fri Apr 11 12:52:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 11 Apr 2014 12:52:19 +0200 (CEST) Subject: [pypy-commit] pypy py3k: copy import library pragma from cpython Message-ID: <20140411105219.3122A1C022D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: py3k Changeset: r70550:4384d75f8e85 Date: 2014-04-11 10:34 +0300 http://bitbucket.org/pypy/pypy/changeset/4384d75f8e85/ Log: copy import library pragma from cpython diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -38,9 +38,9 @@ * taken care of by distutils.) */ # ifdef _DEBUG # error("debug first with cpython") -# pragma comment(lib,"python27.lib") +# pragma comment(lib,"python32.lib") # else -# pragma comment(lib,"python27.lib") +# pragma comment(lib,"python32.lib") # endif /* _DEBUG */ # endif #endif /* _MSC_VER */ From noreply at buildbot.pypy.org Fri Apr 11 12:52:20 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 11 Apr 2014 12:52:20 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup after 8d015b1d68df Message-ID: <20140411105220.64D521C022D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70551:428d22a9c50f Date: 2014-04-11 09:33 +0300 http://bitbucket.org/pypy/pypy/changeset/428d22a9c50f/ Log: cleanup after 8d015b1d68df diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,7 +14,6 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo class BasicTests: @@ -3229,12 +3228,9 @@ self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) # Not a real lock, has all the same properties with respect to GIL # release though, so good for this test. class Lock(object): @@ -3922,13 +3918,10 @@ self.interp_operations(f, []) def test_external_call(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) from rpython.rlib.objectmodel import invoke_around_extcall T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T) class Oups(Exception): pass From noreply at buildbot.pypy.org Fri Apr 11 12:52:21 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 11 Apr 2014 12:52:21 +0200 (CEST) Subject: [pypy-commit] pypy default: time_t is longlong on some platforms Message-ID: <20140411105221.8A5E81C022D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70552:ad0756a3d160 Date: 2014-04-11 09:34 +0300 http://bitbucket.org/pypy/pypy/changeset/ad0756a3d160/ Log: time_t is longlong on some platforms diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3945,9 +3945,9 @@ external(lltype.nullptr(T.TO)) return len(state.l) - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 self.check_operations_history(call_release_gil=1, call_may_force=0) From noreply at buildbot.pypy.org Fri Apr 11 16:35:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 16:35:17 +0200 (CEST) Subject: [pypy-commit] pypy default: A failing test for isvirtual() after the start of a bridge Message-ID: <20140411143517.D21731C333C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70553:43da018e7015 Date: 2014-04-11 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/43da018e7015/ Log: A failing test for isvirtual() after the start of a bridge diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3579,6 +3579,24 @@ 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 1}) + def test_virtual_after_bridge(self): + myjitdriver = JitDriver(greens = [], reds = ["n"]) + @look_inside_iff(lambda x: isvirtual(x)) + def g(x): + return x[0] + def f(n): + while n > 0: + myjitdriver.jit_merge_point(n=n) + x = [1] + if n & 1: # bridge + n -= g(x) + else: + n -= g(x) + return n + res = self.meta_interp(f, [10]) + assert res == 0 + self.check_resops(call=0, call_may_force=0, new_array=0) + def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self): f1 = lambda n: n+1 From noreply at buildbot.pypy.org Fri Apr 11 16:35:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 16:35:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the test in 43da018e7015: consistently call new methods on the metainterp Message-ID: <20140411143519.1CB4D1C333C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70554:0d99c4a82b27 Date: 2014-04-11 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/0d99c4a82b27/ Log: Fix the test in 43da018e7015: consistently call new methods on the metainterp from both the opimpl_xxx and from resume.py diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -387,24 +387,17 @@ @arguments("descr") def opimpl_new(self, sizedescr): - resbox = self.execute_with_descr(rop.NEW, sizedescr) - self.metainterp.heapcache.new(resbox) - return resbox + return self.metainterp.execute_new(sizedescr) @arguments("descr") def opimpl_new_with_vtable(self, sizedescr): cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) - resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.heapcache.new(resbox) - self.metainterp.heapcache.class_now_known(resbox) - return resbox + return self.metainterp.execute_new_with_vtable(ConstInt(cls)) @arguments("box", "descr") def opimpl_new_array(self, lengthbox, itemsizedescr): - resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox) - self.metainterp.heapcache.new_array(resbox, lengthbox) - return resbox + return self.metainterp.execute_new_array(itemsizedescr, lengthbox) @specialize.arg(1) def _do_getarrayitem_gc_any(self, op, arraybox, indexbox, arraydescr): @@ -467,10 +460,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_setarrayitem_gc_any(self, arraybox, indexbox, itembox, arraydescr): - self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, - indexbox, itembox) - self.metainterp.heapcache.setarrayitem( - arraybox, indexbox, itembox, arraydescr) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -623,21 +614,22 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return - # The following test is disabled because buggy. It is supposed + self.metainterp.execute_setfield_gc(fielddescr, box, valuebox) + # The following logic is disabled because buggy. It is supposed # to be: not(we're writing null into a freshly allocated object) # but the bug is that is_unescaped() can be True even after the # field cache is cleared --- see test_ajit:test_unescaped_write_zero - if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): - self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heapcache.setfield(box, valuebox, fielddescr) + # + # if tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): + # self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) + # self.metainterp.heapcache.setfield(box, valuebox, fielddescr) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @arguments("box", "box", "box", "descr") def _opimpl_setinteriorfield_gc_any(self, array, index, value, descr): - self.execute_with_descr(rop.SETINTERIORFIELD_GC, descr, - array, index, value) + self.metainterp.execute_setinteriorfield_gc(descr, array, index, value) opimpl_setinteriorfield_gc_i = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_f = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_r = _opimpl_setinteriorfield_gc_any @@ -664,8 +656,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_raw_store(self, addrbox, offsetbox, valuebox, arraydescr): - self.execute_with_descr(rop.RAW_STORE, arraydescr, - addrbox, offsetbox, valuebox) + self.metainterp.execute_raw_store(arraydescr, + addrbox, offsetbox, valuebox) opimpl_raw_store_i = _opimpl_raw_store opimpl_raw_store_f = _opimpl_raw_store @@ -1891,6 +1883,41 @@ self.attach_debug_info(op) return resbox + def execute_new_with_vtable(self, known_class): + resbox = self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + self.heapcache.new(resbox) + self.heapcache.class_now_known(resbox) + return resbox + + def execute_new(self, typedescr): + resbox = self.execute_and_record(rop.NEW, typedescr) + self.heapcache.new(resbox) + return resbox + + def execute_new_array(self, itemsizedescr, lengthbox): + resbox = self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + self.heapcache.new_array(resbox, lengthbox) + return resbox + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + self.heapcache.setfield(box, valuebox, fielddescr) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + self.heapcache.setarrayitem(arraybox, indexbox, itembox, arraydescr) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + def attach_debug_info(self, op): if (not we_are_translated() and op is not None diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -954,15 +954,14 @@ return virtualizable_boxes, virtualref_boxes def allocate_with_vtable(self, known_class): - return self.metainterp.execute_and_record(rop.NEW_WITH_VTABLE, - None, known_class) + return self.metainterp.execute_new_with_vtable(known_class) def allocate_struct(self, typedescr): - return self.metainterp.execute_and_record(rop.NEW, typedescr) + return self.metainterp.execute_new(typedescr) def allocate_array(self, length, arraydescr): - return self.metainterp.execute_and_record(rop.NEW_ARRAY, - arraydescr, ConstInt(length)) + lengthbox = ConstInt(length) + return self.metainterp.execute_new_array(arraydescr, lengthbox) def allocate_raw_buffer(self, size): cic = self.metainterp.staticdata.callinfocollection @@ -1034,8 +1033,7 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETFIELD_GC, descr, - structbox, fieldbox) + self.metainterp.execute_setfield_gc(descr, structbox, fieldbox) def setinteriorfield(self, index, array, fieldnum, descr): if descr.is_pointer_field(): @@ -1045,8 +1043,8 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETINTERIORFIELD_GC, descr, - array, ConstInt(index), fieldbox) + self.metainterp.execute_setinteriorfield_gc(descr, array, + ConstInt(index), fieldbox) def setarrayitem_int(self, arraybox, index, fieldnum, arraydescr): self._setarrayitem(arraybox, index, fieldnum, arraydescr, INT) @@ -1059,9 +1057,8 @@ def _setarrayitem(self, arraybox, index, fieldnum, arraydescr, kind): itembox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETARRAYITEM_GC, - arraydescr, arraybox, - ConstInt(index), itembox) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + ConstInt(index), itembox) def setrawbuffer_item(self, bufferbox, fieldnum, offset, arraydescr): if arraydescr.is_array_of_pointers(): @@ -1071,8 +1068,8 @@ else: kind = INT itembox = self.decode_box(fieldnum, kind) - return self.metainterp.execute_and_record(rop.RAW_STORE, arraydescr, bufferbox, - ConstInt(offset), itembox) + self.metainterp.execute_raw_store(arraydescr, bufferbox, + ConstInt(offset), itembox) def decode_int(self, tagged): return self.decode_box(tagged, INT) diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -93,6 +93,32 @@ self.resboxes.append(resbox) return resbox + def execute_new_with_vtable(self, known_class): + return self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + + def execute_new(self, typedescr): + return self.execute_and_record(rop.NEW, typedescr) + + def execute_new_array(self, itemsizedescr, lengthbox): + return self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + S = lltype.GcStruct('S') gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) From noreply at buildbot.pypy.org Fri Apr 11 16:58:21 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 11 Apr 2014 16:58:21 +0200 (CEST) Subject: [pypy-commit] pypy default: small changes Message-ID: <20140411145821.DB9691C12F3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70555:8ddedf73edcd Date: 2014-04-03 19:33 +0200 http://bitbucket.org/pypy/pypy/changeset/8ddedf73edcd/ Log: small changes diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -26,8 +26,8 @@ ============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of a -regular PyPy, comparing the JITting version in both cases. It is called +listed below, it should be in theory within 25%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -55,9 +55,9 @@ interested in trying it out, you can download a Ubuntu 12.04 binary here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel -(in other words, you get a GIL effect again, but only if trying to -execute more than 4 threads). +"segments", which means that it will run up to four threads in parallel, +in other words it is running a thread pool up to 4 threads emulating normal +threads. To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like From noreply at buildbot.pypy.org Fri Apr 11 16:58:23 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 11 Apr 2014 16:58:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Some basic customization for rlib parsing (geez, RPython does not compose Message-ID: <20140411145823.103741C12F3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70556:2cd438d1ecea Date: 2014-04-11 16:56 +0200 http://bitbucket.org/pypy/pypy/changeset/2cd438d1ecea/ Log: Some basic customization for rlib parsing (geez, RPython does not compose very well) diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -8,7 +8,7 @@ self.source_pos = source_pos def copy(self): - return Token(self.name, self.source, self.source_pos) + return self.__class__(self.name, self.source, self.source_pos) def __eq__(self, other): # for testing only @@ -57,9 +57,9 @@ self.ignore = dict.fromkeys(ignore) self.matcher = self.automaton.make_lexing_code() - def get_runner(self, text, eof=False): + def get_runner(self, text, eof=False, token_class=None): return LexingDFARunner(self.matcher, self.automaton, text, - self.ignore, eof) + self.ignore, eof, token_class=token_class) def tokenize(self, text, eof=False): """Return a list of Token's from text.""" @@ -184,7 +184,12 @@ return self class LexingDFARunner(AbstractLexingDFARunner): - def __init__(self, matcher, automaton, text, ignore, eof=False): + def __init__(self, matcher, automaton, text, ignore, eof=False, + token_class=None): + if token_class is None: + self.token_class = Token + else: + self.token_class = token_class AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -195,6 +200,6 @@ assert (eof and state == -1) or 0 <= state < len(self.automaton.names) source_pos = SourcePos(index, self.lineno, self.columnno) if eof: - return Token("EOF", "EOF", source_pos) - return Token(self.automaton.names[self.last_matched_state], - text, source_pos) + return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], + text, source_pos) From noreply at buildbot.pypy.org Fri Apr 11 16:58:25 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 11 Apr 2014 16:58:25 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20140411145825.BF8EE1C12F3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70557:09d491e8c01d Date: 2014-04-11 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/09d491e8c01d/ Log: merge diff too long, truncating to 2000 out of 4582 lines diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,7 +48,6 @@ def tearDown(self): os.chdir(self.old_dir) - gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -217,7 +217,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withrangelist", diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,10 +1,12 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ .. _`pypy/bin/pyinteractive.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/pyinteractive.py @@ -35,7 +37,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -49,21 +50,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -75,6 +76,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -93,16 +99,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less @@ -315,6 +335,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -459,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, @@ -491,7 +485,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,11 +15,11 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. Thanks to all donors -for crowd-funding the work so far! Please have a look at the 2nd call -for donation (*not ready yet*) +This work was done mostly by Remi Meier and Armin Rigo. Thanks to all +donors for crowd-funding the work so far! Please have a look at the +`2nd call for donation`_. -.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction @@ -80,6 +80,10 @@ * So far, small examples work fine, but there are still a number of bugs. We're busy fixing them. +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not detected correctly, so may + cause segmentation faults. + * The JIT warm-up time is abysmal (as opposed to the regular PyPy's, which is "only" bad). Moreover, you should run it with a command like ``pypy-stm --jit trace_limit=60000 args...``; the default value of @@ -95,9 +99,11 @@ programs that modify large lists or dicts, suffer from these missing optimizations. -* The GC has no support for destructors: the ``__del__`` method is - never called (including on file objects, which won't be closed for - you). This is of course temporary. +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in @@ -120,6 +126,7 @@ probably, several days or more. .. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h @@ -194,9 +201,9 @@ unchanged. This capability can be hidden in a library or in the framework you use; the end user's code does not need to be explicitly aware of using threads. For a simple example of this, see -`lib_pypy/transaction.py`_. The idea is that if you have a program -where the function ``f(key, value)`` runs on every item of some big -dictionary, you can replace the loop with:: +`transaction.py`_ in ``lib_pypy``. The idea is that if you have a +program where the function ``f(key, value)`` runs on every item of some +big dictionary, you can replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) @@ -217,7 +224,7 @@ is likely to be found, and communicates it to the system, using for example the ``transaction.add()`` scheme. -.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP ================== diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -86,7 +86,7 @@ option (this is the default at some optimization levels like ``-O1``, but unneeded for high-performance translations like ``-O2``). You may get it at -http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc-7.1.tar.gz +http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with pypy. Unpack this folder in the base directory. Then open a command diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1091,7 +1091,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1101,7 +1101,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1113,7 +1113,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -48,11 +48,9 @@ raises(OverflowError, _cffi_backend.newp, SHORT_ARRAY, [-40000]) def test_fast_init_longlong_from_list(self): - if type(2 ** 50) is long: - large_int = 2 ** 30 - else: - large_int = 2 ** 50 import _cffi_backend + import sys + large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) LONGLONG = _cffi_backend.new_primitive_type('long long') P_LONGLONG = _cffi_backend.new_pointer_type(LONGLONG) LONGLONG_ARRAY = _cffi_backend.new_array_type(P_LONGLONG, None) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -137,6 +137,7 @@ self.check_mode_ok(mode) stream = streamio.fdopen_as_stream(fd, mode, buffering, signal_checker(self.space)) + self.check_not_dir(fd) self.fdopenstream(stream, fd, mode) def direct_close(self): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -528,7 +528,7 @@ f = open(fn) exc = raises(EnvironmentError, f.truncate, 3) if sys.platform == 'win32': - assert exc.value.winerror == 5 # ERROR_ACCESS_DENIED + assert exc.value.errno == 5 # ERROR_ACCESS_DENIED else: # CPython explicitely checks the file mode # PyPy relies on the libc to raise the error diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -19,7 +19,7 @@ usemodules += ['fcntl'] else: # On windows, os.popen uses the subprocess module - usemodules += ['_rawffi', 'thread'] + usemodules += ['_rawffi', 'thread', 'signal'] mod.space = gettestobjspace(usemodules=usemodules) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") @@ -305,6 +305,17 @@ finally: __builtins__.file = _file + def test_fdopen_directory(self): + import errno + os = self.posix + try: + fd = os.open('.', os.O_RDONLY) + except OSError as e: + assert e.errno == errno.EACCES + skip("system cannot open directories") + exc = raises(IOError, os.fdopen, fd, 'r') + assert exc.value.errno == errno.EISDIR + def test_getcwd(self): assert isinstance(self.posix.getcwd(), str) assert isinstance(self.posix.getcwdu(), unicode) @@ -340,7 +351,6 @@ else: assert (unicode, u) in typed_result - def test_access(self): pdir = self.pdir + '/file1' posix = self.posix @@ -351,7 +361,6 @@ if sys.platform != "win32": assert not posix.access(pdir, posix.X_OK) - def test_times(self): """ posix.times() should return a five-tuple giving float-representations @@ -1156,8 +1165,8 @@ res = os.system(cmd) assert res == 0 + class AppTestPosixUnicode: - def setup_class(cls): cls.space = space cls.w_posix = space.appexec([], GET_POSIX) @@ -1198,6 +1207,7 @@ except OSError: pass + class AppTestUnicodeFilename: def setup_class(cls): ufilename = (unicode(udir.join('test_unicode_filename_')) + diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -2,27 +2,33 @@ Logic to find sys.executable and the initial sys.path containing the stdlib """ -import sys +import errno import os import stat -import errno +import sys + from rpython.rlib import rpath from rpython.rlib.objectmodel import we_are_translated + from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -platform = sys.platform -IS_WINDOWS = sys.platform == 'win32' +PLATFORM = sys.platform +_MACOSX = sys.platform == 'darwin' +_WIN32 = sys.platform == 'win32' + def find_executable(executable): """ - Return the absolute path of the executable, by looking into PATH and the - current directory. If it cannot be found, return ''. + Return the absolute path of the executable, by looking into PATH and + the current directory. If it cannot be found, return ''. """ - if we_are_translated() and IS_WINDOWS and not executable.lower().endswith('.exe'): + if (we_are_translated() and _WIN32 and + not executable.lower().endswith('.exe')): executable += '.exe' - if os.sep in executable or (IS_WINDOWS and ':' in executable): - pass # the path is already more than just an executable name + if os.sep in executable or (_WIN32 and ':' in executable): + # the path is already more than just an executable name + pass else: path = os.environ.get('PATH') if path: @@ -35,15 +41,15 @@ # 'sys.executable' should not end up being an non-existing file; # just use '' in this case. (CPython issue #7774) - if not os.path.isfile(executable): - executable = '' - return executable + return executable if os.path.isfile(executable) else '' + def _readlink_maybe(filename): - if not IS_WINDOWS: + if not _WIN32: return os.readlink(filename) raise NotImplementedError + def resolvedirof(filename): filename = rpath.rabspath(filename) dirname = rpath.rabspath(os.path.join(filename, '..')) @@ -56,36 +62,37 @@ return resolvedirof(os.path.join(dirname, link)) return dirname + def find_stdlib(state, executable): """ Find and compute the stdlib path, starting from the directory where - ``executable`` is and going one level up until we find it. Return a tuple - (path, prefix), where ``prefix`` is the root directory which contains the - stdlib. - If it cannot be found, return (None, None). + ``executable`` is and going one level up until we find it. Return a + tuple (path, prefix), where ``prefix`` is the root directory which + contains the stdlib. If it cannot be found, return (None, None). """ - if executable == '': - executable = 'pypy-c' - search = executable + search = 'pypy-c' if executable == '' else executable while True: dirname = resolvedirof(search) if dirname == search: - return None, None # not found :-( + return None, None # not found :-( newpath = compute_stdlib_path_maybe(state, dirname) if newpath is not None: return newpath, dirname search = dirname # walk to the parent directory + def _checkdir(path): st = os.stat(path) if not stat.S_ISDIR(st[0]): raise OSError(errno.ENOTDIR, path) + def compute_stdlib_path(state, prefix): """ - Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` must at - least contain a directory called ``lib-python/X.Y`` and another one called - ``lib_pypy``. If they cannot be found, it raises OSError. + Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` + must at least contain a directory called ``lib-python/X.Y`` and + another one called ``lib_pypy``. If they cannot be found, it raises + OSError. """ from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d' % (CPYTHON_VERSION[0], @@ -111,41 +118,42 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if platform != 'win32': - importlist.append(os.path.join(python_std_lib, 'plat-'+platform)) - if platform == 'darwin': + if not _WIN32: + importlist.append(os.path.join(python_std_lib, 'plat-' + PLATFORM)) + if _MACOSX: platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) return importlist + def compute_stdlib_path_maybe(state, prefix): - """ - Return the stdlib path rooted at ``prefix``, or None if it cannot be - found. + """Return the stdlib path rooted at ``prefix``, or None if it cannot + be found. """ try: return compute_stdlib_path(state, prefix) except OSError: return None + @unwrap_spec(executable='str0') def pypy_find_executable(space, executable): return space.wrap(find_executable(executable)) + @unwrap_spec(filename='str0') def pypy_resolvedirof(space, filename): return space.wrap(resolvedirof(filename)) + @unwrap_spec(executable='str0') def pypy_find_stdlib(space, executable): path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - else: - space.setitem(space.sys.w_dict, space.wrap('prefix'), - space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) - return space.newlist([space.wrap(p) for p in path]) + w_prefix = space.wrap(prefix) + space.setitem(space.sys.w_dict, space.wrap('prefix'), w_prefix) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), w_prefix) + return space.newlist([space.wrap(p) for p in path]) diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -84,7 +84,7 @@ assert find_executable('pypy') == a.join('pypy') # monkeypatch.setattr(initpath, 'we_are_translated', lambda: True) - monkeypatch.setattr(initpath, 'IS_WINDOWS', True) + monkeypatch.setattr(initpath, '_WIN32', True) monkeypatch.setenv('PATH', str(a)) a.join('pypy.exe').ensure(file=True) assert find_executable('pypy') == a.join('pypy.exe') diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -656,6 +656,10 @@ return self.len - self.pos return 0 + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class BaseKeyIterator(BaseIteratorImplementation): next_key = _new_next('key') @@ -1191,6 +1195,10 @@ w_ret = space.newtuple([new_inst, space.newtuple([w_res])]) return w_ret + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class W_DictMultiIterKeysObject(W_BaseDictMultiIterObject): def descr_next(self, space): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -873,8 +873,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a TypeCell, which may change without changing the version_tag - assert space.config.objspace.std.withmethodcache - _, w_descr = w_type._pure_lookup_where_with_method_cache( + _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) @@ -932,9 +931,8 @@ # in the class, this time taking care of the result: it can be either a # quasi-constant class attribute, or actually a TypeCell --- which we # must not cache. (It should not be None here, but you never know...) - assert space.config.objspace.std.withmethodcache - _, w_method = w_type._pure_lookup_where_with_method_cache(name, - version_tag) + _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( + name, version_tag) if w_method is None or isinstance(w_method, TypeCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -371,6 +371,12 @@ w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) return w_class, unwrap_cell(space, w_value) + def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): + if w_self.space.config.objspace.std.withmethodcache: + return w_self._pure_lookup_where_with_method_cache(name, version_tag) + else: + return w_self._lookup_where_all_typeobjects(name) + @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -1,10 +1,17 @@ import py from rpython.config.translationoption import get_combined_translation_config from rpython.config.translationoption import set_opt_level -from rpython.config.config import ConflictConfigError +from rpython.config.config import ConflictConfigError, ConfigError +from rpython.translator.platform import platform as compiler def test_no_gcrootfinder_with_boehm(): config = get_combined_translation_config() config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') + +if compiler.name == 'msvc': + def test_no_asmgcrot_on_msvc(): + config = get_combined_translation_config() + py.test.raises(ConfigError, config.translation.setoption, + 'gcrootfinder', 'asmgcc', 'user') diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -1,9 +1,10 @@ import sys import os from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption -from rpython.config.config import ChoiceOption, StrOption, Config +from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError from rpython.config.support import detect_number_of_processors +from rpython.translator.platform import platform as compiler DEFL_INLINE_THRESHOLD = 32.4 # just enough to inline add__Int_Int() # and just small enough to prevend inlining of some rlist functions. @@ -16,8 +17,13 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] +elif compiler.name == 'msvc': + DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" + ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -85,7 +91,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ROOTFINDERS, "shadowstack", cmdline="--gcrootfinder", requires={ diff --git a/rpython/flowspace/test/test_unroll.py b/rpython/flowspace/test/test_unroll.py --- a/rpython/flowspace/test/test_unroll.py +++ b/rpython/flowspace/test/test_unroll.py @@ -1,23 +1,10 @@ import operator + from rpython.flowspace.test.test_objspace import Base -from rpython.rlib.unroll import unrolling_zero, unrolling_iterable +from rpython.rlib.unroll import unrolling_iterable + class TestUnroll(Base): - - def test_unrolling_int(self): - l = range(10) - def f(tot): - i = unrolling_zero - while i < len(l): - tot += l[i] - i = i + 1 - return tot*2 - assert f(0) == sum(l)*2 - - graph = self.codetest(f) - ops = self.all_operations(graph) - assert ops == {'inplace_add': 10, 'mul': 1} - def test_unroller(self): l = unrolling_iterable(range(10)) def f(tot): diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -5,7 +5,7 @@ """ import weakref -import os +import os, py from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype from rpython.rlib.jit import JitDriver, dont_look_inside @@ -13,6 +13,7 @@ from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir from rpython.config.translationoption import DEFL_GC +from rpython.config.config import ConfigError class X(object): @@ -166,6 +167,9 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) + except ConfigError, e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -1,4 +1,4 @@ -import os, sys +import os, sys, py from rpython.tool.udir import udir from rpython.rlib.jit import JitDriver, unroll_parameters, set_param from rpython.rlib.jit import PARAMETERS, dont_look_inside @@ -7,7 +7,7 @@ from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy - +from rpython.config.config import ConfigError class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() @@ -252,6 +252,9 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) + except ConfigError,e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/x86/support.py b/rpython/jit/backend/x86/support.py --- a/rpython/jit/backend/x86/support.py +++ b/rpython/jit/backend/x86/support.py @@ -7,11 +7,12 @@ extra = ['-DPYPY_X86_CHECK_SSE2'] if sys.platform != 'win32': extra += ['-msse2', '-mfpmath=sse'] + else: + extra += ['/arch:SSE2'] else: extra = [] # the -m options above are always on by default on x86-64 -if sys.platform != 'win32': - extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra +extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = extra, diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py @@ -1,4 +1,9 @@ +import py from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests +from rpython.translator.platform import platform as compiler + +if compiler.name == 'msvc': + py.test.skip('asmgcc buggy on msvc') class TestAsmGcc(CompileFrameworkTests): gcrootfinder = "asmgcc" diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -567,7 +567,8 @@ # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. - hash = self.status & self.ST_SHIFT_MASK + hash = self.status + assert hash == (self.status & self.ST_SHIFT_MASK) # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. diff --git a/rpython/jit/metainterp/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py rename from rpython/jit/metainterp/test/test_intbound.py rename to rpython/jit/metainterp/optimizeopt/test/test_intbound.py --- a/rpython/jit/metainterp/test/test_intbound.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py @@ -1,5 +1,7 @@ from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \ IntLowerBound, IntUnbounded +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 + from copy import copy import sys from rpython.rlib.rarithmetic import LONG_BIT @@ -235,8 +237,8 @@ for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)): #assert not b.lshift_bound(shift_count_bound).has_upper assert not b.rshift_bound(shift_count_bound).has_upper - - + + def test_div_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -258,7 +260,6 @@ assert a.contains(0) - def test_sub_bound(): for _, _, b1 in some_bounds(): for _, _, b2 in some_bounds(): @@ -271,3 +272,14 @@ a=bound(2, 4).sub_bound(bound(1, 2)) assert not a.contains(-1) assert not a.contains(4) + + +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py deleted file mode 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py +++ /dev/null @@ -1,12 +0,0 @@ -from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 - - -def test_next_pow2_m1(): - assert next_pow2_m1(0) == 0 - assert next_pow2_m1(1) == 1 - assert next_pow2_m1(7) == 7 - assert next_pow2_m1(256) == 511 - assert next_pow2_m1(255) == 255 - assert next_pow2_m1(80) == 127 - assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 - assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -0,0 +1,1181 @@ +from __future__ import with_statement +import py +from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ + VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes +from rpython.jit.metainterp.optimizeopt.optimizer import OptValue +from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ + equaloplists, FakeDescrWithSnapshot +from rpython.jit.metainterp.optimizeopt.intutils import IntBound +from rpython.jit.metainterp.history import TreeLoop, JitCellToken +from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData +from rpython.jit.metainterp.resoperation import ResOperation, rop + +class TestBasic: + someptr1 = LLtypeMixin.myptr + someptr2 = LLtypeMixin.myptr2 + + def test_position_generalization(self): + def postest(info1, info2): + info1.position = 0 + assert info1.generalization_of(info1, {}, {}) + info2.position = 0 + assert info1.generalization_of(info2, {}, {}) + info2.position = 1 + renum = {} + assert info1.generalization_of(info2, renum, {}) + assert renum == {0:1} + assert info1.generalization_of(info2, {0:1}, {}) + assert info1.generalization_of(info2, {1:1}, {}) + bad = {} + assert not info1.generalization_of(info2, {0:0}, bad) + assert info1 in bad and info2 in bad + + for BoxType in (BoxInt, BoxFloat, BoxPtr): + info1 = NotVirtualStateInfo(OptValue(BoxType())) + info2 = NotVirtualStateInfo(OptValue(BoxType())) + postest(info1, info2) + + info1, info2 = VArrayStateInfo(42), VArrayStateInfo(42) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + info1, info2 = VStructStateInfo(42, []), VStructStateInfo(42, []) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + info1, info2 = VirtualStateInfo(ConstInt(42), []), VirtualStateInfo(ConstInt(42), []) + info1.fieldstate = info2.fieldstate = [] + postest(info1, info2) + + def test_NotVirtualStateInfo_generalization(self): + def isgeneral(value1, value2): + info1 = NotVirtualStateInfo(value1) + info1.position = 0 + info2 = NotVirtualStateInfo(value2) + info2.position = 0 + return info1.generalization_of(info2, {}, {}) + + assert isgeneral(OptValue(BoxInt()), OptValue(ConstInt(7))) + assert not isgeneral(OptValue(ConstInt(7)), OptValue(BoxInt())) + + ptr = OptValue(BoxPtr()) + nonnull = OptValue(BoxPtr()) + nonnull.make_nonnull(0) + knownclass = OptValue(BoxPtr()) + knownclass.make_constant_class(ConstPtr(self.someptr1), 0) + const = OptValue(BoxPtr) + const.make_constant_class(ConstPtr(self.someptr1), 0) + const.make_constant(ConstPtr(self.someptr1)) + inorder = [ptr, nonnull, knownclass, const] + for i in range(len(inorder)): + for j in range(i, len(inorder)): + assert isgeneral(inorder[i], inorder[j]) + if i != j: + assert not isgeneral(inorder[j], inorder[i]) + + value1 = OptValue(BoxInt()) + value2 = OptValue(BoxInt()) + value2.intbound.make_lt(IntBound(10, 10)) + assert isgeneral(value1, value2) + assert not isgeneral(value2, value1) + + assert isgeneral(OptValue(ConstInt(7)), OptValue(ConstInt(7))) + S = lltype.GcStruct('S') + foo = lltype.malloc(S) + fooref = lltype.cast_opaque_ptr(llmemory.GCREF, foo) + assert isgeneral(OptValue(ConstPtr(fooref)), + OptValue(ConstPtr(fooref))) + + value1 = OptValue(BoxPtr()) + value1.make_nonnull(None) + value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + assert not isgeneral(value1, value2) + + def test_field_matching_generalization(self): + const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) + const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) + const1.position = const2.position = 1 + assert not const1.generalization_of(const2, {}, {}) + assert not const2.generalization_of(const1, {}, {}) + + def fldtst(info1, info2): + info1.position = info2.position = 0 + info1.fieldstate = [const1] + info2.fieldstate = [const2] + assert not info1.generalization_of(info2, {}, {}) + assert not info2.generalization_of(info1, {}, {}) + assert info1.generalization_of(info1, {}, {}) + assert info2.generalization_of(info2, {}, {}) + fldtst(VArrayStateInfo(42), VArrayStateInfo(42)) + fldtst(VStructStateInfo(42, [7]), VStructStateInfo(42, [7])) + fldtst(VirtualStateInfo(ConstInt(42), [7]), VirtualStateInfo(ConstInt(42), [7])) + + def test_known_class_generalization(self): + knownclass1 = OptValue(BoxPtr()) + knownclass1.make_constant_class(ConstPtr(self.someptr1), 0) + info1 = NotVirtualStateInfo(knownclass1) + info1.position = 0 + knownclass2 = OptValue(BoxPtr()) + knownclass2.make_constant_class(ConstPtr(self.someptr1), 0) + info2 = NotVirtualStateInfo(knownclass2) + info2.position = 0 + assert info1.generalization_of(info2, {}, {}) + assert info2.generalization_of(info1, {}, {}) + + knownclass3 = OptValue(BoxPtr()) + knownclass3.make_constant_class(ConstPtr(self.someptr2), 0) + info3 = NotVirtualStateInfo(knownclass3) + info3.position = 0 + assert not info1.generalization_of(info3, {}, {}) + assert not info2.generalization_of(info3, {}, {}) + assert not info3.generalization_of(info2, {}, {}) + assert not info3.generalization_of(info1, {}, {}) + + + def test_circular_generalization(self): + for info in (VArrayStateInfo(42), VStructStateInfo(42, [7]), + VirtualStateInfo(ConstInt(42), [7])): + info.position = 0 + info.fieldstate = [info] + assert info.generalization_of(info, {}, {}) + + +class BaseTestGenerateGuards(BaseTest): + def guards(self, info1, info2, box, expected): + info1.position = info2.position = 0 + guards = [] + info1.generate_guards(info2, box, self.cpu, guards, {}) + self.compare(guards, expected, [box]) + + def compare(self, guards, expected, inputargs): + loop = self.parse(expected) + boxmap = {} + assert len(loop.inputargs) == len(inputargs) + for a, b in zip(loop.inputargs, inputargs): + boxmap[a] = b + for op in loop.operations: + if op.is_guard(): + op.setdescr(None) + assert equaloplists(guards, loop.operations, False, + boxmap) + def test_intbounds(self): + value1 = OptValue(BoxInt()) + value1.intbound.make_ge(IntBound(0, 10)) + value1.intbound.make_le(IntBound(20, 30)) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(BoxInt())) + expected = """ + [i0] + i1 = int_ge(i0, 0) + guard_true(i1) [] + i2 = int_le(i0, 30) + guard_true(i2) [] + """ + self.guards(info1, info2, BoxInt(15), expected) + py.test.raises(InvalidLoop, self.guards, + info1, info2, BoxInt(50), expected) + + + def test_known_class(self): + value1 = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value1.make_constant_class(classbox, -1) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(self.nodebox)) + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, self.nodebox, expected) + py.test.raises(InvalidLoop, self.guards, + info1, info2, BoxPtr(), expected) + + def test_known_class_value(self): + value1 = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value1.make_constant_class(classbox, -1) + box = self.nodebox + guards = value1.make_guards(box) + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.compare(guards, expected, [box]) + + def test_known_value(self): + value1 = OptValue(self.nodebox) + value1.make_constant(ConstInt(1)) + box = self.nodebox + guards = value1.make_guards(box) + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.compare(guards, expected, [box]) + + def test_equal_inputargs(self): + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + vstate1 = VirtualState([knownclass_info, knownclass_info]) + assert vstate1.generalization_of(vstate1) + + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + vstate2 = VirtualState([unknown_info1, unknown_info1]) + assert vstate2.generalization_of(vstate2) + assert not vstate1.generalization_of(vstate2) + assert vstate2.generalization_of(vstate1) + + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + unknown_info2 = NotVirtualStateInfo(OptValue(self.nodebox)) + vstate3 = VirtualState([unknown_info1, unknown_info2]) + assert vstate3.generalization_of(vstate2) + assert vstate3.generalization_of(vstate1) + assert not vstate2.generalization_of(vstate3) + assert not vstate1.generalization_of(vstate3) + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + guards = [] + vstate1.generate_guards(vstate2, [self.nodebox, self.nodebox], self.cpu, guards) + self.compare(guards, expected, [self.nodebox]) + + with py.test.raises(InvalidLoop): + guards = [] + vstate1.generate_guards(vstate3, [self.nodebox, self.nodebox], + self.cpu, guards) + with py.test.raises(InvalidLoop): + guards = [] + vstate2.generate_guards(vstate3, [self.nodebox, self.nodebox], + self.cpu, guards) + + def test_virtuals_with_equal_fields(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(42), [1, 2]) + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + info2.fieldstate = [unknown_info1, unknown_info1] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + assert not vstate1.generalization_of(vstate2) + assert vstate2.generalization_of(vstate1) + + info3 = VirtualStateInfo(ConstInt(42), [1, 2]) + unknown_info1 = NotVirtualStateInfo(OptValue(self.nodebox)) + unknown_info2 = NotVirtualStateInfo(OptValue(self.nodebox)) + info3.fieldstate = [unknown_info1, unknown_info2] + vstate3 = VirtualState([info3]) + assert vstate3.generalization_of(vstate2) + assert vstate3.generalization_of(vstate1) + assert not vstate2.generalization_of(vstate3) + assert not vstate1.generalization_of(vstate3) + + def test_virtuals_with_nonmatching_fields(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_virtuals_with_nonmatching_descrs(self): + info1 = VirtualStateInfo(ConstInt(42), [10, 20]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_virtuals_with_nonmatching_classes(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VirtualStateInfo(ConstInt(7), [1, 2]) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_nonvirtual_is_not_virtual(self): + info1 = VirtualStateInfo(ConstInt(42), [1, 2]) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = NotVirtualStateInfo(value) + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_arrays_with_nonmatching_fields(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VArrayStateInfo(42) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_arrays_of_different_sizes(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_arrays_with_nonmatching_types(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = VArrayStateInfo(7) + value = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info2.fieldstate = [knownclass_info, knownclass_info] + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + + assert not vstate2.generalization_of(vstate1) + assert not vstate1.generalization_of(vstate2) + + def test_nonvirtual_is_not_array(self): + info1 = VArrayStateInfo(42) + value = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + value.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(value) + info1.fieldstate = [knownclass_info, knownclass_info] + vstate1 = VirtualState([info1]) + assert vstate1.generalization_of(vstate1) + + info2 = NotVirtualStateInfo(value) + vstate2 = VirtualState([info2]) + assert vstate2.generalization_of(vstate2) + From noreply at buildbot.pypy.org Fri Apr 11 17:05:57 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 11 Apr 2014 17:05:57 +0200 (CEST) Subject: [pypy-commit] pypy default: add a link to my thesis, because it seems to contain a better explanation of the JIT than the current docs Message-ID: <20140411150557.AB1761C12F3@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r70558:38c5c29e277b Date: 2014-04-11 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/38c5c29e277b/ Log: add a link to my thesis, because it seems to contain a better explanation of the JIT than the current docs diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag.`__ might also contain additional informations. .. __: http://morepypy.blogspot.com/search/label/jit From noreply at buildbot.pypy.org Fri Apr 11 17:51:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 17:51:48 +0200 (CEST) Subject: [pypy-commit] pypy default: a pair of typos Message-ID: <20140411155148.836731C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70559:18dd97fad063 Date: 2014-04-11 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/18dd97fad063/ Log: a pair of typos diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -183,6 +183,6 @@ .. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf -The `blog posts with the JIT tag.`__ might also contain additional informations. +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit From noreply at buildbot.pypy.org Fri Apr 11 18:10:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 18:10:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: The next todo Message-ID: <20140411161059.80A271C0962@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70560:fae248f0ef17 Date: 2014-04-11 18:10 +0200 http://bitbucket.org/pypy/pypy/changeset/fae248f0ef17/ Log: The next todo diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -91,6 +91,11 @@ ------------------------------------------------------------ +pypy_g_BlackholeInterpBuilder_acquire_interp creates conflicts +by caching the BlackholeInterps + +------------------------------------------------------------ + From noreply at buildbot.pypy.org Fri Apr 11 19:07:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Apr 2014 19:07:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Disable all caching of the BlackholeInterpreter objects in the Message-ID: <20140411170707.3C3921C147D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70561:b2d0af2aa055 Date: 2014-04-11 18:31 +0200 http://bitbucket.org/pypy/pypy/changeset/b2d0af2aa055/ Log: Disable all caching of the BlackholeInterpreter objects in the presence of stm. Temporary solution. diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -92,7 +92,8 @@ ------------------------------------------------------------ pypy_g_BlackholeInterpBuilder_acquire_interp creates conflicts -by caching the BlackholeInterps +by caching the BlackholeInterps. It has been disabled, check +if we can re-enable it... ------------------------------------------------------------ diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -3,7 +3,7 @@ from rpython.jit.metainterp.compile import ResumeAtPositionDescr from rpython.jit.metainterp.jitexc import get_llexception, reraise from rpython.jit.metainterp import jitexc -from rpython.rlib import longlong2float +from rpython.rlib import longlong2float, rgc from rpython.rlib.debug import ll_assert, make_sure_not_resized from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck @@ -235,6 +235,8 @@ return handler def acquire_interp(self): + if rgc.stm_is_enabled(): # XXX for now, no caching + return BlackholeInterpreter(self, 0) if len(self.blackholeinterps) > 0: return self.blackholeinterps.pop() else: @@ -242,6 +244,8 @@ return BlackholeInterpreter(self, self.num_interpreters) def release_interp(self, interp): + if rgc.stm_is_enabled(): # XXX for now, no caching + return interp.cleanup_registers() self.blackholeinterps.append(interp) From noreply at buildbot.pypy.org Fri Apr 11 23:11:47 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 11 Apr 2014 23:11:47 +0200 (CEST) Subject: [pypy-commit] pypy py3k: On Unicode wide builds (=all except win32), don't merge utf16 surrogate pairs on encoding. Message-ID: <20140411211147.80B241C022D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r70562:5494a374d576 Date: 2014-04-11 23:10 +0200 http://bitbucket.org/pypy/pypy/changeset/5494a374d576/ Log: On Unicode wide builds (=all except win32), don't merge utf16 surrogate pairs on encoding. This only affects python3 which sets allow_surrogates=False. diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -331,7 +331,8 @@ ch2 = ord(s[pos]) # Check for low surrogate and combine the two to # form a UCS4 value - if ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF: + if ((allow_surrogates or MAXUNICODE < 65536) and + ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF): ch3 = ((ch - 0xD800) << 10 | (ch2 - 0xDC00)) + 0x10000 pos += 1 _encodeUCS4(result, ch3) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -803,3 +803,20 @@ u, len(u), True) == r'\ud800\udc00' assert runicode.unicode_encode_raw_unicode_escape( u, len(u), True) == r'\ud800\udc00' + + def test_encode_surrogate_pair_utf8(self): + u = runicode.UNICHR(0xD800) + runicode.UNICHR(0xDC00) + if runicode.MAXUNICODE < 65536: + # Narrow unicode build, consider utf16 surrogate pairs + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=False) == '\xf0\x90\x80\x80' + else: + # Wide unicode build, merge utf16 surrogate pairs only when allowed + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' + # Surrogates not merged, encoding fails. + py.test.raises( + UnicodeEncodeError, runicode.unicode_encode_utf_8, + u, len(u), True, allow_surrogates=False) From noreply at buildbot.pypy.org Fri Apr 11 23:42:28 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 11 Apr 2014 23:42:28 +0200 (CEST) Subject: [pypy-commit] pypy vendor/stdlib-3.3.5: Branch for stlib-3.3.5 Message-ID: <20140411214228.CBE751C02AF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: vendor/stdlib-3.3.5 Changeset: r70563:f2946f4e007e Date: 2014-04-06 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/f2946f4e007e/ Log: Branch for stlib-3.3.5 From noreply at buildbot.pypy.org Fri Apr 11 23:42:34 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 11 Apr 2014 23:42:34 +0200 (CEST) Subject: [pypy-commit] pypy vendor/stdlib-3.3.5: Drop stdlib from Python-3.3.5 tarball Message-ID: <20140411214234.CA2CF1C02AF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: vendor/stdlib-3.3.5 Changeset: r70564:73c95e7d0840 Date: 2014-04-06 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/73c95e7d0840/ Log: Drop stdlib from Python-3.3.5 tarball diff too long, truncating to 2000 out of 173214 lines diff --git a/lib-python/3/_abcoll.py b/lib-python/3/_abcoll.py deleted file mode 100644 --- a/lib-python/3/_abcoll.py +++ /dev/null @@ -1,623 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. - -DON'T USE THIS MODULE DIRECTLY! The classes here should be imported -via collections; they are defined here only to alleviate certain -bootstrapping issues. Unit tests are in test_collections. -""" - -from abc import ABCMeta, abstractmethod -import sys - -__all__ = ["Hashable", "Iterable", "Iterator", - "Sized", "Container", "Callable", - "Set", "MutableSet", - "Mapping", "MutableMapping", - "MappingView", "KeysView", "ItemsView", "ValuesView", - "Sequence", "MutableSequence", - "ByteString", - ] - - -### collection related types which are not exposed through builtin ### -## iterators ## -bytes_iterator = type(iter(b'')) -bytearray_iterator = type(iter(bytearray())) -#callable_iterator = ??? -dict_keyiterator = type(iter({}.keys())) -dict_valueiterator = type(iter({}.values())) -dict_itemiterator = type(iter({}.items())) -list_iterator = type(iter([])) -list_reverseiterator = type(iter(reversed([]))) -range_iterator = type(iter(range(0))) -set_iterator = type(iter(set())) -str_iterator = type(iter("")) -tuple_iterator = type(iter(())) -zip_iterator = type(iter(zip())) -## views ## -dict_keys = type({}.keys()) -dict_values = type({}.values()) -dict_items = type({}.items()) -## misc ## -dict_proxy = type(type.__dict__) - - -### ONE-TRICK PONIES ### - -class Hashable(metaclass=ABCMeta): - - @abstractmethod - def __hash__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Hashable: - for B in C.__mro__: - if "__hash__" in B.__dict__: - if B.__dict__["__hash__"]: - return True - break - return NotImplemented - - -class Iterable(metaclass=ABCMeta): - - @abstractmethod - def __iter__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterable: - if any("__iter__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -class Iterator(Iterable): - - @abstractmethod - def __next__(self): - raise StopIteration - - def __iter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterator: - if (any("__next__" in B.__dict__ for B in C.__mro__) and - any("__iter__" in B.__dict__ for B in C.__mro__)): - return True - return NotImplemented - -Iterator.register(bytes_iterator) -Iterator.register(bytearray_iterator) -#Iterator.register(callable_iterator) -Iterator.register(dict_keyiterator) -Iterator.register(dict_valueiterator) -Iterator.register(dict_itemiterator) -Iterator.register(list_iterator) -Iterator.register(list_reverseiterator) -Iterator.register(range_iterator) -Iterator.register(set_iterator) -Iterator.register(str_iterator) -Iterator.register(tuple_iterator) -Iterator.register(zip_iterator) - -class Sized(metaclass=ABCMeta): - - @abstractmethod - def __len__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Sized: - if any("__len__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -class Container(metaclass=ABCMeta): - - @abstractmethod - def __contains__(self, x): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Container: - if any("__contains__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -class Callable(metaclass=ABCMeta): - - @abstractmethod - def __call__(self, *args, **kwds): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Callable: - if any("__call__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -### SETS ### - - -class Set(Sized, Iterable, Container): - - """A set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__ and __len__. - - To override the comparisons (presumably for speed, as the - semantics are fixed), all you have to do is redefine __le__ and - then the other operations will automatically follow suit. - """ - - def __le__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) > len(other): - return False - for elem in self: - if elem not in other: - return False - return True - - def __lt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) < len(other) and self.__le__(other) - - def __gt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return other.__lt__(self) - - def __ge__(self, other): - if not isinstance(other, Set): - return NotImplemented - return other.__le__(self) - - def __eq__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) == len(other) and self.__le__(other) - - def __ne__(self, other): - return not (self == other) - - @classmethod - def _from_iterable(cls, it): - '''Construct an instance of the class from any iterable input. - - Must override this method if the class constructor signature - does not accept an iterable for an input. - ''' - return cls(it) - - def __and__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - return self._from_iterable(value for value in other if value in self) - - def isdisjoint(self, other): - for value in other: - if value in self: - return False - return True - - def __or__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - chain = (e for s in (self, other) for e in s) - return self._from_iterable(chain) - - def __sub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in self - if value not in other) - - def __xor__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return (self - other) | (other - self) - - def _hash(self): - """Compute the hash value of a set. - - Note that we don't define __hash__: not all sets are hashable. - But if you define a hashable set type, its __hash__ should - call this function. - - This must be compatible __eq__. - - All sets ought to compare equal if they contain the same - elements, regardless of how they are implemented, and - regardless of the order of the elements; so there's not much - freedom for __eq__ or __hash__. We match the algorithm used - by the built-in frozenset type. - """ - MAX = sys.maxsize - MASK = 2 * MAX + 1 - n = len(self) - h = 1927868237 * (n + 1) - h &= MASK - for x in self: - hx = hash(x) - h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 - h &= MASK - h = h * 69069 + 907133923 - h &= MASK - if h > MAX: - h -= MASK + 1 - if h == -1: - h = 590923713 - return h - -Set.register(frozenset) - - -class MutableSet(Set): - - @abstractmethod - def add(self, value): - """Add an element.""" - raise NotImplementedError - - @abstractmethod - def discard(self, value): - """Remove an element. Do not raise an exception if absent.""" - raise NotImplementedError - - def remove(self, value): - """Remove an element. If not a member, raise a KeyError.""" - if value not in self: - raise KeyError(value) - self.discard(value) - - def pop(self): - """Return the popped value. Raise KeyError if empty.""" - it = iter(self) - try: - value = next(it) - except StopIteration: - raise KeyError - self.discard(value) - return value - - def clear(self): - """This is slow (creates N new iterators!) but effective.""" - try: - while True: - self.pop() - except KeyError: - pass - - def __ior__(self, it): - for value in it: - self.add(value) - return self - - def __iand__(self, it): - for value in (self - it): - self.discard(value) - return self - - def __ixor__(self, it): - if it is self: - self.clear() - else: - if not isinstance(it, Set): - it = self._from_iterable(it) - for value in it: - if value in self: - self.discard(value) - else: - self.add(value) - return self - - def __isub__(self, it): - if it is self: - self.clear() - else: - for value in it: - self.discard(value) - return self - -MutableSet.register(set) - - -### MAPPINGS ### - - -class Mapping(Sized, Iterable, Container): - - @abstractmethod - def __getitem__(self, key): - raise KeyError - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def __contains__(self, key): - try: - self[key] - except KeyError: - return False - else: - return True - - def keys(self): - return KeysView(self) - - def items(self): - return ItemsView(self) - - def values(self): - return ValuesView(self) - - def __eq__(self, other): - if not isinstance(other, Mapping): - return NotImplemented - return dict(self.items()) == dict(other.items()) - - def __ne__(self, other): - return not (self == other) - - -class MappingView(Sized): - - def __init__(self, mapping): - self._mapping = mapping - - def __len__(self): - return len(self._mapping) - - def __repr__(self): - return '{0.__class__.__name__}({0._mapping!r})'.format(self) - - -class KeysView(MappingView, Set): - - @classmethod - def _from_iterable(self, it): - return set(it) - - def __contains__(self, key): - return key in self._mapping - - def __iter__(self): - for key in self._mapping: - yield key - -KeysView.register(dict_keys) - - -class ItemsView(MappingView, Set): - - @classmethod - def _from_iterable(self, it): - return set(it) - - def __contains__(self, item): - key, value = item - try: - v = self._mapping[key] - except KeyError: - return False - else: - return v == value - - def __iter__(self): - for key in self._mapping: - yield (key, self._mapping[key]) - -ItemsView.register(dict_items) - - -class ValuesView(MappingView): - - def __contains__(self, value): - for key in self._mapping: - if value == self._mapping[key]: - return True - return False - - def __iter__(self): - for key in self._mapping: - yield self._mapping[key] - -ValuesView.register(dict_values) - - -class MutableMapping(Mapping): - - @abstractmethod - def __setitem__(self, key, value): - raise KeyError - - @abstractmethod - def __delitem__(self, key): - raise KeyError - - __marker = object() - - def pop(self, key, default=__marker): - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def popitem(self): - try: - key = next(iter(self)) - except StopIteration: - raise KeyError - value = self[key] - del self[key] - return key, value - - def clear(self): - try: - while True: - self.popitem() - except KeyError: - pass - - def update(*args, **kwds): - if len(args) > 2: - raise TypeError("update() takes at most 2 positional " - "arguments ({} given)".format(len(args))) - elif not args: - raise TypeError("update() takes at least 1 argument (0 given)") - self = args[0] - other = args[1] if len(args) >= 2 else () - - if isinstance(other, Mapping): - for key in other: - self[key] = other[key] - elif hasattr(other, "keys"): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - def setdefault(self, key, default=None): - try: - return self[key] - except KeyError: - self[key] = default - return default - -MutableMapping.register(dict) - - -### SEQUENCES ### - - -class Sequence(Sized, Iterable, Container): - - """All the operations on a read-only sequence. - - Concrete subclasses must override __new__ or __init__, - __getitem__, and __len__. - """ - - @abstractmethod - def __getitem__(self, index): - raise IndexError - - def __iter__(self): - i = 0 - try: - while True: - v = self[i] - yield v - i += 1 - except IndexError: - return - - def __contains__(self, value): - for v in self: - if v == value: - return True - return False - - def __reversed__(self): - for i in reversed(range(len(self))): - yield self[i] - - def index(self, value): - for i, v in enumerate(self): - if v == value: - return i - raise ValueError - - def count(self, value): - return sum(1 for v in self if v == value) - -Sequence.register(tuple) -Sequence.register(str) -Sequence.register(range) - - -class ByteString(Sequence): - - """This unifies bytes and bytearray. - - XXX Should add all their methods. - """ - -ByteString.register(bytes) -ByteString.register(bytearray) - - -class MutableSequence(Sequence): - - @abstractmethod - def __setitem__(self, index, value): - raise IndexError - - @abstractmethod - def __delitem__(self, index): - raise IndexError - - @abstractmethod - def insert(self, index, value): - raise IndexError - - def append(self, value): - self.insert(len(self), value) - - def reverse(self): - n = len(self) - for i in range(n//2): - self[i], self[n-i-1] = self[n-i-1], self[i] - - def extend(self, values): - for v in values: - self.append(v) - - def pop(self, index=-1): - v = self[index] - del self[index] - return v - - def remove(self, value): - del self[self.index(value)] - - def __iadd__(self, values): - self.extend(values) - return self - -MutableSequence.register(list) -MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/lib-python/3/_compat_pickle.py b/lib-python/3/_compat_pickle.py --- a/lib-python/3/_compat_pickle.py +++ b/lib-python/3/_compat_pickle.py @@ -76,6 +76,62 @@ ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'), } +PYTHON2_EXCEPTIONS = ( + "ArithmeticError", + "AssertionError", + "AttributeError", + "BaseException", + "BufferError", + "BytesWarning", + "DeprecationWarning", + "EOFError", + "EnvironmentError", + "Exception", + "FloatingPointError", + "FutureWarning", + "GeneratorExit", + "IOError", + "ImportError", + "ImportWarning", + "IndentationError", + "IndexError", + "KeyError", + "KeyboardInterrupt", + "LookupError", + "MemoryError", + "NameError", + "NotImplementedError", + "OSError", + "OverflowError", + "PendingDeprecationWarning", + "ReferenceError", + "RuntimeError", + "RuntimeWarning", + # StandardError is gone in Python 3, so we map it to Exception + "StopIteration", + "SyntaxError", + "SyntaxWarning", + "SystemError", + "SystemExit", + "TabError", + "TypeError", + "UnboundLocalError", + "UnicodeDecodeError", + "UnicodeEncodeError", + "UnicodeError", + "UnicodeTranslateError", + "UnicodeWarning", + "UserWarning", + "ValueError", + "Warning", + "ZeroDivisionError", +) + +for excname in PYTHON2_EXCEPTIONS: + NAME_MAPPING[("exceptions", excname)] = ("builtins", excname) + +NAME_MAPPING[("exceptions", "StandardError")] = ("builtins", "Exception") + # Same, but for 3.x to 2.x REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) diff --git a/lib-python/3/_dummy_thread.py b/lib-python/3/_dummy_thread.py --- a/lib-python/3/_dummy_thread.py +++ b/lib-python/3/_dummy_thread.py @@ -24,11 +24,7 @@ # imports are done when needed on a function-by-function basis. Since threads # are disabled, the import lock should not be an issue anyway (??). -class error(Exception): - """Dummy implementation of _thread.error.""" - - def __init__(self, *args): - self.args = args +error = RuntimeError def start_new_thread(function, args, kwargs={}): """Dummy implementation of _thread.start_new_thread(). diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py --- a/lib-python/3/_osx_support.py +++ b/lib-python/3/_osx_support.py @@ -53,7 +53,7 @@ def _read_output(commandstring): - """Output from succesful command execution or None""" + """Output from successful command execution or None""" # Similar to os.popen(commandstring, "r").read(), # but without actually using os.popen because that # function is not usable during python bootstrap. @@ -152,7 +152,7 @@ # are not installed. # # Futhermore, the compiler that can be used varies between - # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' + # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. @@ -192,7 +192,7 @@ if cc != oldcc: # Found a replacement compiler. - # Modify config vars using new compiler, if not already explictly + # Modify config vars using new compiler, if not already explicitly # overriden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: @@ -235,13 +235,19 @@ if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: # NOTE: Cannot use subprocess here because of bootstrap # issues when building Python itself - status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%( - _config_vars['CC'].replace("'", "'\"'\"'"),)) - # The Apple compiler drivers return status 255 if no PPC - if (status >> 8) == 255: - # Compiler doesn't support PPC, remove the related - # '-arch' flags if not explicitly overridden by an - # environment variable + status = os.system( + """echo 'int main{};' | """ + """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null""" + %(_config_vars['CC'].replace("'", "'\"'\"'"),)) + if status: + # The compile failed for some reason. Because of differences + # across Xcode and compiler versions, there is no reliable way + # to be sure why it failed. Assume here it was due to lack of + # PPC support and remove the related '-arch' flags from each + # config variables not explicitly overriden by an environment + # variable. If the error was for some other reason, we hope the + # failure will show up again when trying to compile an extension + # module. for cv in _UNIVERSAL_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] @@ -274,7 +280,7 @@ # compile an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. This is particularly important with - # the standalong Command Line Tools alternative to a + # the standalone Command Line Tools alternative to a # full-blown Xcode install since the CLT packages do not # provide SDKs. If the SDK is not present, it is assumed # that the header files and dev libs have been installed @@ -378,7 +384,7 @@ compilers are present, i.e. when installing pure Python dists. Customization of compiler paths and detection of unavailable archs is deferred - until the first extention module build is + until the first extension module build is requested (in distutils.sysconfig.customize_compiler). Currently called from distutils.sysconfig diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -5,7 +5,6 @@ import os import abc import codecs -import warnings import errno # Import _thread instead of threading to reduce startup cost try: @@ -15,7 +14,11 @@ import io from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END) -from errno import EINTR + +valid_seek_flags = {0, 1, 2} # Hardwired values +if hasattr(os, 'SEEK_HOLE') : + valid_seek_flags.add(os.SEEK_HOLE) + valid_seek_flags.add(os.SEEK_DATA) # open() uses st_blksize whenever we can DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes @@ -24,20 +27,12 @@ # defined in io.py. We don't use real inheritance though, because we don't # want to inherit the C implementations. - -class BlockingIOError(IOError): - - """Exception raised when I/O would block on a non-blocking I/O stream.""" - - def __init__(self, errno, strerror, characters_written=0): - super().__init__(errno, strerror) - if not isinstance(characters_written, int): - raise TypeError("characters_written must be a integer") - self.characters_written = characters_written +# Rebind for compatibility +BlockingIOError = BlockingIOError def open(file, mode="r", buffering=-1, encoding=None, errors=None, - newline=None, closefd=True): + newline=None, closefd=True, opener=None): r"""Open file and return a stream. Raise IOError upon failure. @@ -47,21 +42,22 @@ wrapped. (If a file descriptor is given, it is closed when the returned I/O object is closed, unless closefd is set to False.) - mode is an optional string that specifies the mode in which the file - is opened. It defaults to 'r' which means open for reading in text - mode. Other common values are 'w' for writing (truncating the file if - it already exists), and 'a' for appending (which on some Unix systems, - means that all writes append to the end of the file regardless of the - current seek position). In text mode, if encoding is not specified the - encoding used is platform dependent. (For reading and writing raw - bytes use binary mode and leave encoding unspecified.) The available - modes are: + mode is an optional string that specifies the mode in which the file is + opened. It defaults to 'r' which means open for reading in text mode. Other + common values are 'w' for writing (truncating the file if it already + exists), 'x' for exclusive creation of a new file, and 'a' for appending + (which on some Unix systems, means that all writes append to the end of the + file regardless of the current seek position). In text mode, if encoding is + not specified the encoding used is platform dependent. (For reading and + writing raw bytes use binary mode and leave encoding unspecified.) The + available modes are: ========= =============================================================== Character Meaning --------- --------------------------------------------------------------- 'r' open for reading (default) 'w' open for writing, truncating the file first + 'x' create a new file and open it for writing 'a' open for writing, appending to the end of the file if it exists 'b' binary mode 't' text mode (default) @@ -72,7 +68,8 @@ The default mode is 'rt' (open for reading text). For binary random access, the mode 'w+b' opens and truncates the file to 0 bytes, while - 'r+b' opens the file without truncation. + 'r+b' opens the file without truncation. The 'x' mode implies 'w' and + raises an `FileExistsError` if the file already exists. Python distinguishes between files opened in binary and text modes, even when the underlying operating system doesn't. Files opened in @@ -132,6 +129,12 @@ be kept open when the file is closed. This does not work when a file name is given and must be True in that case. + A custom opener can be used by passing a callable as *opener*. The + underlying file descriptor for the file object is then obtained by calling + *opener* with (*file*, *flags*). *opener* must return an open file + descriptor (passing os.open as *opener* results in functionality similar to + passing None). + open() returns a file object whose type depends on the mode, and through which the standard file operations such as reading and writing are performed. When open() is used to open a file in a text mode ('w', @@ -157,8 +160,9 @@ if errors is not None and not isinstance(errors, str): raise TypeError("invalid errors: %r" % errors) modes = set(mode) - if modes - set("arwb+tU") or len(mode) > len(modes): + if modes - set("axrwb+tU") or len(mode) > len(modes): raise ValueError("invalid mode: %r" % mode) + creating = "x" in modes reading = "r" in modes writing = "w" in modes appending = "a" in modes @@ -166,14 +170,14 @@ text = "t" in modes binary = "b" in modes if "U" in modes: - if writing or appending: + if creating or writing or appending: raise ValueError("can't use U and writing mode at once") reading = True if text and binary: raise ValueError("can't have text and binary mode at once") - if reading + writing + appending > 1: + if creating + reading + writing + appending > 1: raise ValueError("can't have read/write/append mode at once") - if not (reading or writing or appending): + if not (creating or reading or writing or appending): raise ValueError("must have exactly one of read/write/append mode") if binary and encoding is not None: raise ValueError("binary mode doesn't take an encoding argument") @@ -182,11 +186,12 @@ if binary and newline is not None: raise ValueError("binary mode doesn't take a newline argument") raw = FileIO(file, + (creating and "x" or "") + (reading and "r" or "") + (writing and "w" or "") + (appending and "a" or "") + (updating and "+" or ""), - closefd) + closefd, opener=opener) line_buffering = False if buffering == 1 or buffering < 0 and raw.isatty(): buffering = -1 @@ -208,7 +213,7 @@ raise ValueError("can't have unbuffered text I/O") if updating: buffer = BufferedRandom(raw, buffering) - elif writing or appending: + elif creating or writing or appending: buffer = BufferedWriter(raw, buffering) elif reading: buffer = BufferedReader(raw, buffering) @@ -305,6 +310,7 @@ * 0 -- start of stream (the default); offset should be zero or positive * 1 -- current stream position; offset may be negative * 2 -- end of stream; offset is usually negative + Some operating systems / file systems could provide additional values. Return an int indicating the new absolute position. """ @@ -340,8 +346,10 @@ This method has no effect if the file is already closed. """ if not self.__closed: - self.flush() - self.__closed = True + try: + self.flush() + finally: + self.__closed = True def __del__(self): """Destructor. Calls close().""" @@ -865,7 +873,7 @@ elif whence == 2: self._pos = max(0, len(self._buffer) + pos) else: - raise ValueError("invalid whence value") + raise ValueError("unsupported whence value") return self._pos def tell(self): @@ -954,15 +962,19 @@ # Special case for when the number of bytes to read is unspecified. if n is None or n == -1: self._reset_read_buf() + if hasattr(self.raw, 'readall'): + chunk = self.raw.readall() + if chunk is None: + return buf[pos:] or None + else: + return buf[pos:] + chunk chunks = [buf[pos:]] # Strip the consumed bytes. current_size = 0 while True: # Read until EOF or until read() would block. try: chunk = self.raw.read() - except IOError as e: - if e.errno != EINTR: - raise + except InterruptedError: continue if chunk in empty_values: nodata_val = chunk @@ -984,9 +996,7 @@ while avail < n: try: chunk = self.raw.read(wanted) - except IOError as e: - if e.errno != EINTR: - raise + except InterruptedError: continue if chunk in empty_values: nodata_val = chunk @@ -1019,9 +1029,7 @@ while True: try: current = self.raw.read(to_read) - except IOError as e: - if e.errno != EINTR: - raise + except InterruptedError: continue break if current: @@ -1046,7 +1054,7 @@ return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos def seek(self, pos, whence=0): - if not (0 <= whence <= 2): + if whence not in valid_seek_flags: raise ValueError("invalid whence value") with self._read_lock: if whence == 1: @@ -1064,19 +1072,13 @@ DEFAULT_BUFFER_SIZE. """ - _warning_stack_offset = 2 - - def __init__(self, raw, - buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): + def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): if not raw.writable(): raise IOError('"raw" argument must be writable.') _BufferedIOMixin.__init__(self, raw) if buffer_size <= 0: raise ValueError("invalid buffer size") - if max_buffer_size is not None: - warnings.warn("max_buffer_size is deprecated", DeprecationWarning, - self._warning_stack_offset) self.buffer_size = buffer_size self._write_buf = bytearray() self._write_lock = Lock() @@ -1126,13 +1128,11 @@ while self._write_buf: try: n = self.raw.write(self._write_buf) + except InterruptedError: + continue except BlockingIOError: raise RuntimeError("self.raw should implement RawIOBase: it " "should not raise BlockingIOError") - except IOError as e: - if e.errno != EINTR: - raise - continue if n is None: raise BlockingIOError( errno.EAGAIN, @@ -1145,8 +1145,8 @@ return _BufferedIOMixin.tell(self) + len(self._write_buf) def seek(self, pos, whence=0): - if not (0 <= whence <= 2): - raise ValueError("invalid whence") + if whence not in valid_seek_flags: + raise ValueError("invalid whence value") with self._write_lock: self._flush_unlocked() return _BufferedIOMixin.seek(self, pos, whence) @@ -1168,15 +1168,11 @@ # XXX The usefulness of this (compared to having two separate IO # objects) is questionable. - def __init__(self, reader, writer, - buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): + def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE): """Constructor. The arguments are two RawIO instances. """ - if max_buffer_size is not None: - warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2) - if not reader.readable(): raise IOError('"reader" argument must be readable.') @@ -1233,17 +1229,14 @@ defaults to DEFAULT_BUFFER_SIZE. """ - _warning_stack_offset = 3 - - def __init__(self, raw, - buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None): + def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): raw._checkSeekable() BufferedReader.__init__(self, raw, buffer_size) - BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size) + BufferedWriter.__init__(self, raw, buffer_size) def seek(self, pos, whence=0): - if not (0 <= whence <= 2): - raise ValueError("invalid whence") + if whence not in valid_seek_flags: + raise ValueError("invalid whence value") self.flush() if self._read_buf: # Undo read ahead. @@ -1455,7 +1448,7 @@ r"""Character and line based layer over a BufferedIOBase object, buffer. encoding gives the name of the encoding that the stream will be - decoded or encoded with. It defaults to locale.getpreferredencoding. + decoded or encoded with. It defaults to locale.getpreferredencoding(False). errors determines the strictness of encoding and decoding (see the codecs.register) and defaults to "strict". @@ -1476,6 +1469,9 @@ _CHUNK_SIZE = 2048 + # The write_through argument has no effect here since this + # implementation always writes through. The argument is present only + # so that the signature can match the signature of the C version. def __init__(self, buffer, encoding=None, errors=None, newline=None, line_buffering=False, write_through=False): if newline is not None and not isinstance(newline, str): @@ -1494,11 +1490,16 @@ # Importing locale may fail if Python is being built encoding = "ascii" else: - encoding = locale.getpreferredencoding() + encoding = locale.getpreferredencoding(False) if not isinstance(encoding, str): raise ValueError("invalid encoding: %r" % encoding) + if not codecs.lookup(encoding)._is_text_encoding: + msg = ("%r is not a text encoding; " + "use codecs.open() to handle arbitrary codecs") + raise LookupError(msg % encoding) + if errors is None: errors = "strict" else: @@ -1521,6 +1522,7 @@ self._snapshot = None # info for reconstructing decoder state self._seekable = self._telling = self.buffer.seekable() self._has_read1 = hasattr(self.buffer, 'read1') + self._b2cratio = 0.0 if self._seekable and self.writable(): position = self.buffer.tell() @@ -1589,8 +1591,10 @@ def close(self): if self.buffer is not None and not self.closed: - self.flush() - self.buffer.close() + try: + self.flush() + finally: + self.buffer.close() @property def closed(self): @@ -1693,7 +1697,12 @@ else: input_chunk = self.buffer.read(self._CHUNK_SIZE) eof = not input_chunk - self._set_decoded_chars(self._decoder.decode(input_chunk, eof)) + decoded_chars = self._decoder.decode(input_chunk, eof) + self._set_decoded_chars(decoded_chars) + if decoded_chars: + self._b2cratio = len(input_chunk) / len(self._decoded_chars) + else: + self._b2cratio = 0.0 if self._telling: # At the snapshot point, len(dec_buffer) bytes before the read, @@ -1747,20 +1756,56 @@ # forward until it gives us enough decoded characters. saved_state = decoder.getstate() try: + # Fast search for an acceptable start point, close to our + # current pos. + # Rationale: calling decoder.decode() has a large overhead + # regardless of chunk size; we want the number of such calls to + # be O(1) in most situations (common decoders, non-crazy input). + # Actually, it will be exactly 1 for fixed-size codecs (all + # 8-bit codecs, also UTF-16 and UTF-32). + skip_bytes = int(self._b2cratio * chars_to_skip) + skip_back = 1 + assert skip_bytes <= len(next_input) + while skip_bytes > 0: + decoder.setstate((b'', dec_flags)) + # Decode up to temptative start point + n = len(decoder.decode(next_input[:skip_bytes])) + if n <= chars_to_skip: + b, d = decoder.getstate() + if not b: + # Before pos and no bytes buffered in decoder => OK + dec_flags = d + chars_to_skip -= n + break + # Skip back by buffered amount and reset heuristic + skip_bytes -= len(b) + skip_back = 1 + else: + # We're too far ahead, skip back a bit + skip_bytes -= skip_back + skip_back = skip_back * 2 + else: + skip_bytes = 0 + decoder.setstate((b'', dec_flags)) + # Note our initial start point. - decoder.setstate((b'', dec_flags)) - start_pos = position - start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0 - need_eof = 0 + start_pos = position + skip_bytes + start_flags = dec_flags + if chars_to_skip == 0: + # We haven't moved from the start point. + return self._pack_cookie(start_pos, start_flags) # Feed the decoder one byte at a time. As we go, note the # nearest "safe start point" before the current location # (a point where the decoder has nothing buffered, so seek() # can safely start from there and advance to this location). - next_byte = bytearray(1) - for next_byte[0] in next_input: + bytes_fed = 0 + need_eof = 0 + # Chars decoded since `start_pos` + chars_decoded = 0 + for i in range(skip_bytes, len(next_input)): bytes_fed += 1 - chars_decoded += len(decoder.decode(next_byte)) + chars_decoded += len(decoder.decode(next_input[i:i+1])) dec_buffer, dec_flags = decoder.getstate() if not dec_buffer and chars_decoded <= chars_to_skip: # Decoder buffer is empty, so this is a safe start point. @@ -1819,8 +1864,7 @@ self._decoder.reset() return position if whence != 0: - raise ValueError("invalid whence (%r, should be 0, 1 or 2)" % - (whence,)) + raise ValueError("unsupported whence (%r)" % (whence,)) if cookie < 0: raise ValueError("negative seek position %r" % (cookie,)) self.flush() @@ -2005,7 +2049,7 @@ def __init__(self, initial_value="", newline="\n"): super(StringIO, self).__init__(BytesIO(), encoding="utf-8", - errors="strict", + errors="surrogatepass", newline=newline) # Issue #5645: make universal newlines semantics the same as in the # C version, even under Windows. @@ -2021,7 +2065,13 @@ def getvalue(self): self.flush() - return self.buffer.getvalue().decode(self._encoding, self._errors) + decoder = self._decoder or self._get_decoder() + old_state = decoder.getstate() + decoder.reset() + try: + return decoder.decode(self.buffer.getvalue(), final=True) + finally: + decoder.setstate(old_state) def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -225,7 +225,7 @@ """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This - prevents the possibility of a match occuring for a value that also + prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). @@ -326,10 +326,10 @@ bad_directive = "%" del err raise ValueError("'%s' is a bad directive in format '%s'" % - (bad_directive, format)) + (bad_directive, format)) from None # IndexError only occurs when the format string is "%" except IndexError: - raise ValueError("stray %% in format '%s'" % format) + raise ValueError("stray %% in format '%s'" % format) from None _regex_cache[format] = format_regex found = format_regex.match(data_string) if not found: @@ -486,19 +486,19 @@ return (year, month, day, hour, minute, second, - weekday, julian, tz, gmtoff, tzname), fraction + weekday, julian, tz, tzname, gmtoff), fraction def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a time struct based on the input string and the format string.""" tt = _strptime(data_string, format)[0] - return time.struct_time(tt[:9]) + return time.struct_time(tt[:time._STRUCT_TM_ITEMS]) def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"): """Return a class cls instance based on the input string and the format string.""" tt, fraction = _strptime(data_string, format) - gmtoff, tzname = tt[-2:] + tzname, gmtoff = tt[-2:] args = tt[:6] + (fraction,) if gmtoff is not None: tzdelta = datetime_timedelta(seconds=gmtoff) diff --git a/lib-python/3/_weakrefset.py b/lib-python/3/_weakrefset.py --- a/lib-python/3/_weakrefset.py +++ b/lib-python/3/_weakrefset.py @@ -60,6 +60,8 @@ for itemref in self.data: item = itemref() if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. yield item def __len__(self): diff --git a/lib-python/3/abc.py b/lib-python/3/abc.py --- a/lib-python/3/abc.py +++ b/lib-python/3/abc.py @@ -26,7 +26,8 @@ class abstractclassmethod(classmethod): - """A decorator indicating abstract classmethods. + """ + A decorator indicating abstract classmethods. Similar to abstractmethod. @@ -36,6 +37,9 @@ @abstractclassmethod def my_abstract_classmethod(cls, ...): ... + + 'abstractclassmethod' is deprecated. Use 'classmethod' with + 'abstractmethod' instead. """ __isabstractmethod__ = True @@ -46,7 +50,8 @@ class abstractstaticmethod(staticmethod): - """A decorator indicating abstract staticmethods. + """ + A decorator indicating abstract staticmethods. Similar to abstractmethod. @@ -56,6 +61,9 @@ @abstractstaticmethod def my_abstract_staticmethod(...): ... + + 'abstractstaticmethod' is deprecated. Use 'staticmethod' with + 'abstractmethod' instead. """ __isabstractmethod__ = True @@ -66,7 +74,8 @@ class abstractproperty(property): - """A decorator indicating abstract properties. + """ + A decorator indicating abstract properties. Requires that the metaclass is ABCMeta or derived from it. A class that has a metaclass derived from ABCMeta cannot be @@ -88,7 +97,11 @@ def getx(self): ... def setx(self, value): ... x = abstractproperty(getx, setx) + + 'abstractproperty' is deprecated. Use 'property' with 'abstractmethod' + instead. """ + __isabstractmethod__ = True @@ -133,11 +146,14 @@ return cls def register(cls, subclass): - """Register a virtual subclass of an ABC.""" + """Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + """ if not isinstance(subclass, type): raise TypeError("Can only register classes") if issubclass(subclass, cls): - return # Already a subclass + return subclass # Already a subclass # Subtle: test for cycles *after* testing for "already a subclass"; # this means we allow X.register(X) and interpret it as a no-op. if issubclass(cls, subclass): @@ -145,6 +161,7 @@ raise RuntimeError("Refusing to create an inheritance cycle") cls._abc_registry.add(subclass) ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache + return subclass def _dump_registry(cls, file=None): """Debug helper to print the ABC registry.""" diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -123,7 +123,7 @@ compression type, and then write audio frames using writeframesraw. When all frames have been written, either call writeframes('') or close() to patch up the sizes in the header. -Marks can be added anytime. If there are any marks, ypu must call +Marks can be added anytime. If there are any marks, you must call close() after all frames have been written. The close() method is called automatically when the class instance is destroyed. @@ -136,6 +136,7 @@ import struct import builtins +import warnings __all__ = ["Error", "open", "openfp"] @@ -440,7 +441,7 @@ kludge = 0 if chunk.chunksize == 18: kludge = 1 - print('Warning: bad COMM chunk size') + warnings.warn('Warning: bad COMM chunk size') chunk.chunksize = 23 #DEBUG end self._comptype = chunk.read(4) @@ -456,15 +457,13 @@ if self._comptype != b'NONE': if self._comptype == b'G722': self._convert = self._adpcm2lin - self._framesize = self._framesize // 4 elif self._comptype in (b'ulaw', b'ULAW'): self._convert = self._ulaw2lin - self._framesize = self._framesize // 2 elif self._comptype in (b'alaw', b'ALAW'): self._convert = self._alaw2lin - self._framesize = self._framesize // 2 else: raise Error('unsupported compression type') + self._sampwidth = 2 else: self._comptype = b'NONE' self._compname = b'not compressed' @@ -484,11 +483,10 @@ # a position 0 and name '' self._markers.append((id, pos, name)) except EOFError: - print('Warning: MARK chunk contains only', end=' ') - print(len(self._markers), end=' ') - if len(self._markers) == 1: print('marker', end=' ') - else: print('markers', end=' ') - print('instead of', nmarkers) + w = ('Warning: MARK chunk contains only %s marker%s instead of %s' % + (len(self._markers), '' if len(self._markers) == 1 else 's', + nmarkers)) + warnings.warn(w) class Aifc_write: # Variables used in this class: @@ -773,7 +771,10 @@ self._datalength = (self._datalength + 3) // 4 if self._datalength & 1: self._datalength = self._datalength + 1 - self._form_length_pos = self._file.tell() + try: + self._form_length_pos = self._file.tell() + except (AttributeError, OSError): + self._form_length_pos = None commlength = self._write_form_length(self._datalength) if self._aifc: self._file.write(b'AIFC') @@ -785,15 +786,20 @@ self._file.write(b'COMM') _write_ulong(self._file, commlength) _write_short(self._file, self._nchannels) - self._nframes_pos = self._file.tell() + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) - _write_short(self._file, self._sampwidth * 8) + if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): + _write_short(self._file, 8) + else: + _write_short(self._file, self._sampwidth * 8) _write_float(self._file, self._framerate) if self._aifc: self._file.write(self._comptype) _write_string(self._file, self._compname) self._file.write(b'SSND') - self._ssnd_length_pos = self._file.tell() + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() _write_ulong(self._file, self._datalength + 8) _write_ulong(self._file, 0) _write_ulong(self._file, 0) @@ -873,23 +879,27 @@ sys.argv.append('/usr/demos/data/audio/bach.aiff') fn = sys.argv[1] f = open(fn, 'r') - print("Reading", fn) - print("nchannels =", f.getnchannels()) - print("nframes =", f.getnframes()) - print("sampwidth =", f.getsampwidth()) - print("framerate =", f.getframerate()) - print("comptype =", f.getcomptype()) - print("compname =", f.getcompname()) - if sys.argv[2:]: - gn = sys.argv[2] - print("Writing", gn) - g = open(gn, 'w') - g.setparams(f.getparams()) - while 1: - data = f.readframes(1024) - if not data: - break - g.writeframes(data) - g.close() + try: + print("Reading", fn) + print("nchannels =", f.getnchannels()) + print("nframes =", f.getnframes()) + print("sampwidth =", f.getsampwidth()) + print("framerate =", f.getframerate()) + print("comptype =", f.getcomptype()) + print("compname =", f.getcompname()) + if sys.argv[2:]: + gn = sys.argv[2] + print("Writing", gn) + g = open(gn, 'w') + try: + g.setparams(f.getparams()) + while 1: + data = f.readframes(1024) + if not data: + break + g.writeframes(data) + finally: + g.close() + print("Done.") + finally: f.close() - print("Done.") diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -71,6 +71,7 @@ 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', + 'MetavarTypeHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', @@ -164,6 +165,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -335,7 +338,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -419,7 +422,8 @@ # produce all arg strings elif not action.option_strings: - part = self._format_args(action, action.dest) + default = self._get_default_metavar_for_positional(action) + part = self._format_args(action, default) # if it's in a group, strip the outer [] if action in group_actions: @@ -441,7 +445,7 @@ # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: - default = action.dest.upper() + default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) @@ -474,7 +478,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -482,7 +486,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -527,7 +531,8 @@ def _format_action_invocation(self, action): if not action.option_strings: - metavar, = self._metavar_formatter(action, action.dest)(1) + default = self._get_default_metavar_for_positional(action) + metavar, = self._metavar_formatter(action, default)(1) return metavar else: @@ -541,7 +546,7 @@ # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: - default = action.dest.upper() + default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) @@ -619,6 +624,12 @@ def _get_help_string(self, action): return action.help + def _get_default_metavar_for_optional(self, action): + return action.dest.upper() + + def _get_default_metavar_for_positional(self, action): + return action.dest + class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. @@ -628,7 +639,7 @@ """ def _fill_text(self, text, width, indent): - return ''.join([indent + line for line in text.splitlines(True)]) + return ''.join(indent + line for line in text.splitlines(keepends=True)) class RawTextHelpFormatter(RawDescriptionHelpFormatter): @@ -659,6 +670,22 @@ return help +class MetavarTypeHelpFormatter(HelpFormatter): + """Help message formatter which uses the argument 'type' as the default + metavar value (instead of the argument 'dest') + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_default_metavar_for_optional(self, action): + return action.type.__name__ + + def _get_default_metavar_for_positional(self, action): + return action.type.__name__ + + + # ===================== # Options and Arguments # ===================== @@ -1554,7 +1581,6 @@ usage=None, description=None, epilog=None, - version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', @@ -1563,14 +1589,6 @@ conflict_handler='error', add_help=True): - if version is not None: - import warnings - warnings.warn( - """The "version" argument to ArgumentParser is deprecated. """ - """Please use """ - """"add_argument(..., action='version', version="N", ...)" """ - """instead""", DeprecationWarning) - superinit = super(ArgumentParser, self).__init__ superinit(description=description, prefix_chars=prefix_chars, @@ -1584,7 +1602,6 @@ self.prog = prog self.usage = usage self.epilog = epilog - self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help @@ -1599,7 +1616,7 @@ return string self.register('type', None, identity) - # add help and version arguments if necessary + # add help argument if necessary # (using explicit default to override global argument_default) default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] if self.add_help: @@ -1607,12 +1624,6 @@ default_prefix+'h', default_prefix*2+'help', action='help', default=SUPPRESS, help=_('show this help message and exit')) - if self.version: - self.add_argument( - default_prefix+'v', default_prefix*2+'version', - action='version', default=SUPPRESS, - version=self.version, - help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: @@ -1632,7 +1643,6 @@ 'prog', 'usage', 'description', - 'version', 'formatter_class', 'conflict_handler', 'add_help', @@ -1940,29 +1950,29 @@ # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) - # if we didn't use all the Positional objects, there were too few - # arg strings supplied. - if positionals: - self.error(_('too few arguments')) - - # make sure all required actions were present, and convert defaults. + # make sure all required actions were present and also convert + # action defaults which were not given as arguments + required_actions = [] for action in self._actions: if action not in seen_actions: if action.required: - name = _get_action_name(action) - self.error(_('argument %s is required') % name) + required_actions.append(_get_action_name(action)) else: # Convert action default now instead of doing it before # parsing arguments to avoid calling convert functions # twice (which may fail) if the argument was given, but # only if it was defined already in the namespace if (action.default is not None and - isinstance(action.default, str) and - hasattr(namespace, action.dest) and - action.default is getattr(namespace, action.dest)): + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): setattr(namespace, action.dest, self._get_value(action, action.default)) + if required_actions: + self.error(_('the following arguments are required: %s') % + ', '.join(required_actions)) + # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: @@ -2314,16 +2324,6 @@ # determine help from format above return formatter.format_help() - def format_version(self): - import warnings - warnings.warn( - 'The format_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - formatter = self._get_formatter() - formatter.add_text(self.version) - return formatter.format_help() - def _get_formatter(self): return self.formatter_class(prog=self.prog) @@ -2340,14 +2340,6 @@ file = _sys.stdout self._print_message(self.format_help(), file) - def print_version(self, file=None): - import warnings - warnings.warn( - 'The print_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - self._print_message(self.format_version(), file) - def _print_message(self, message, file=None): if message: if file is None: diff --git a/lib-python/3/ast.py b/lib-python/3/ast.py --- a/lib-python/3/ast.py +++ b/lib-python/3/ast.py @@ -25,7 +25,6 @@ :license: Python License. """ from _ast import * -from _ast import __version__ def parse(source, filename='', mode='exec'): diff --git a/lib-python/3/asynchat.py b/lib-python/3/asynchat.py --- a/lib-python/3/asynchat.py +++ b/lib-python/3/asynchat.py @@ -49,18 +49,6 @@ import asyncore from collections import deque -def buffer(obj, start=None, stop=None): - # if memoryview objects gain slicing semantics, - # this function will change for the better - # memoryview used for the TypeError - memoryview(obj) - if start == None: - start = 0 - if stop == None: - stop = len(obj) - x = obj[start:stop] - ## print("buffer type is: %s"%(type(x),)) - return x class async_chat (asyncore.dispatcher): """This is an abstract class. You must derive from this class, and add @@ -75,18 +63,15 @@ # sign of an application bug that we don't want to pass silently use_encoding = 0 - encoding = 'latin1' + encoding = 'latin-1' def __init__ (self, sock=None, map=None): # for string terminator matching self.ac_in_buffer = b'' - # we use a list here rather than cStringIO for a few reasons... - # del lst[:] is faster than sio.truncate(0) - # lst = [] is faster than sio.truncate(0) - # cStringIO will be gaining unicode support in py3k, which - # will negatively affect the performance of bytes compared to - # a ''.join() equivalent + # we use a list here rather than io.BytesIO for a few reasons... + # del lst[:] is faster than bio.truncate(0) + # lst = [] is faster than bio.truncate(0) self.incoming = [] # we toss the use of the "simple producer" and replace it with @@ -240,7 +225,7 @@ # handle classic producer behavior obs = self.ac_out_buffer_size try: - data = buffer(first, 0, obs) + data = first[:obs] except TypeError: data = first.more() if data: diff --git a/lib-python/3/asyncore.py b/lib-python/3/asyncore.py --- a/lib-python/3/asyncore.py +++ b/lib-python/3/asyncore.py @@ -54,7 +54,7 @@ import os from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \ - ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \ + ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \ errorcode _DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, @@ -143,11 +143,8 @@ try: r, w, e = select.select(r, w, e, timeout) - except select.error as err: - if err.args[0] != EINTR: - raise - else: - return + except InterruptedError: + return for fd in r: obj = map.get(fd) @@ -184,15 +181,10 @@ if obj.writable() and not obj.accepting: flags |= select.POLLOUT if flags: - # Only check for exceptions if object was either readable - # or writable. - flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL pollster.register(fd, flags) try: r = pollster.poll(timeout) - except select.error as err: - if err.args[0] != EINTR: - raise + except InterruptedError: r = [] for fd, flags in r: obj = map.get(fd) @@ -292,7 +284,7 @@ del map[fd] self._fileno = None - def create_socket(self, family, type): + def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM): self.family_and_type = family, type sock = socket.socket(family, type) sock.setblocking(0) diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py --- a/lib-python/3/base64.py +++ b/lib-python/3/base64.py @@ -29,14 +29,16 @@ bytes_types = (bytes, bytearray) # Types acceptable as binary data - -def _translate(s, altchars): - if not isinstance(s, bytes_types): - raise TypeError("expected bytes, not %s" % s.__class__.__name__) - translation = bytearray(range(256)) - for k, v in altchars.items(): - translation[ord(k)] = v[0] - return s.translate(translation) +def _bytes_from_decode_data(s): + if isinstance(s, str): + try: + return s.encode('ascii') + except UnicodeEncodeError: + raise ValueError('string argument should contain only ASCII characters') + elif isinstance(s, bytes_types): + return s + else: + raise TypeError("argument should be bytes or ASCII string, not %s" % s.__class__.__name__) @@ -61,7 +63,7 @@ raise TypeError("expected bytes, not %s" % altchars.__class__.__name__) assert len(altchars) == 2, repr(altchars) - return _translate(encoded, {'+': altchars[0:1], '/': altchars[1:2]}) + return encoded.translate(bytes.maketrans(b'+/', altchars)) return encoded @@ -79,14 +81,11 @@ discarded prior to the padding check. If validate is True, non-base64-alphabet characters in the input result in a binascii.Error. """ - if not isinstance(s, bytes_types): - raise TypeError("expected bytes, not %s" % s.__class__.__name__) From noreply at buildbot.pypy.org Fri Apr 11 23:42:36 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 11 Apr 2014 23:42:36 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Branch for Python 3.3 support Message-ID: <20140411214236.0AEBA1C02AF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70565:99e3d2848c35 Date: 2014-04-06 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/99e3d2848c35/ Log: Branch for Python 3.3 support From noreply at buildbot.pypy.org Sat Apr 12 02:12:23 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 12 Apr 2014 02:12:23 +0200 (CEST) Subject: [pypy-commit] pypy default: kill remnants of the long dead MeasuringDictImplementation Message-ID: <20140412001223.32F311C022D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70596:d821161dcac8 Date: 2014-04-11 17:05 -0700 http://bitbucket.org/pypy/pypy/changeset/d821161dcac8/ Log: kill remnants of the long dead MeasuringDictImplementation diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1277,7 +1277,6 @@ class TestBytesDictImplementation(BaseTestRDictImplementation): StrategyClass = BytesDictStrategy - #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1289,9 +1288,6 @@ self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) -## class TestMeasuringDictImplementation(BaseTestRDictImplementation): -## ImplementionClass = MeasuringDictImplementation -## DevolvedClass = MeasuringDictImplementation class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): diff --git a/pypy/tool/readdictinfo.py b/pypy/tool/readdictinfo.py deleted file mode 100644 --- a/pypy/tool/readdictinfo.py +++ /dev/null @@ -1,115 +0,0 @@ -# this is for use with a pypy-c build with multidicts and using the -# MeasuringDictImplementation -- it will create a file called -# 'dictinfo.txt' in the local directory and this file will turn the -# contents back into DictInfo objects. - -# run with python -i ! - -import sys - -if __name__ == '__main__': - infile = open(sys.argv[1]) - - curr = None - slots = [] - for line in infile: - if line == '------------------\n': - if curr: - break - curr = 1 - else: - attr, val = [s.strip() for s in line.split(':')] - slots.append(attr) - - class DictInfo(object): - __slots__ = slots - - infile = open(sys.argv[1]) - - infos = [] - - for line in infile: - if line == '------------------\n': - curr = object.__new__(DictInfo) - infos.append(curr) - else: - attr, val = [s.strip() for s in line.split(':')] - if '.' in val: - val = float(val) - else: - val = int(val) - setattr(curr, attr, val) - -def histogram(infos, keyattr, *attrs): - r = {} - for info in infos: - v = getattr(info, keyattr) - l = r.setdefault(v, [0, {}]) - l[0] += 1 - for a in attrs: - d2 = l[1].setdefault(a, {}) - v2 = getattr(info, a) - d2[v2] = d2.get(v2, 0) + 1 - return sorted(r.items()) - -def reportDictInfos(): - d = {} - stillAlive = 0 - totLifetime = 0.0 - for info in infos: - for attr in slots: - if attr == 'maxcontents': - continue - v = getattr(info, attr) - if not isinstance(v, int): - continue - d[attr] = d.get(attr, 0) + v - if info.lifetime != -1.0: - totLifetime += info.lifetime - else: - stillAlive += 1 - print 'read info on', len(infos), 'dictionaries' - if stillAlive != len(infos): - print 'average lifetime', totLifetime/(len(infos) - stillAlive), - print '('+str(stillAlive), 'still alive at exit)' - print d - -def Rify(fname, *attributes): - output = open(fname, 'w') - for attr in attributes: - print >>output, attr, - print >>output - for info in infos: - for attr in attributes: - print >>output, getattr(info, attr), - print >>output - -if __name__ == '__main__': -# reportDictInfos() - - # interactive stuff: - - import __builtin__ - - def displayhook(v): - if v is not None: - __builtin__._ = v - pprint.pprint(v) - sys.displayhook = displayhook - - import pprint - try: - import readline - except ImportError: - pass - else: - import rlcompleter - readline.parse_and_bind('tab: complete') - - if len(sys.argv) > 2: - attrs = sys.argv[2].split(',') - if attrs == ['all']: - attrs = slots - Rify("R.txt", *attrs) - - diff --git a/pypy/tool/rundictbenchmarks.py b/pypy/tool/rundictbenchmarks.py deleted file mode 100644 --- a/pypy/tool/rundictbenchmarks.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys, os - -# this file runs some benchmarks with a pypy-c that is assumed to be -# built using the MeasuringDictImplementation. - -# it should be run with pypy/goal as the cwd, and you'll -# need to hack a copy of rst2html for yourself (svn docutils -# required). - -if __name__ == '__main__': - try: - os.unlink("dictinfo.txt") - except os.error: - pass - - progs = [('pystone', ['-c', 'from test import pystone; pystone.main()']), - ('richards', ['richards.py']), - ('docutils', ['rst2html.py', '../../doc/coding-guide.txt', 'foo.html']), - ('translate', ['translate.py', '--backendopt', '--no-compile', '--batch', - 'targetrpystonedalone.py']) - ] - - EXE = sys.argv[1] - - for suffix, args in progs: - os.spawnv(os.P_WAIT, EXE, [EXE] + args) - os.rename('dictinfo.txt', 'dictinfo-%s.txt'%suffix) From noreply at buildbot.pypy.org Sat Apr 12 02:12:25 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 12 Apr 2014 02:12:25 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140412001225.0ADB01C022D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70597:fe69dee13c0f Date: 2014-04-11 17:10 -0700 http://bitbucket.org/pypy/pypy/changeset/fe69dee13c0f/ Log: merge default diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,3 +1,6 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -26,8 +26,8 @@ ============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of a -regular PyPy, comparing the JITting version in both cases. It is called +listed below, it should be in theory within 25%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -55,9 +55,9 @@ interested in trying it out, you can download a Ubuntu 12.04 binary here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel -(in other words, you get a GIL effect again, but only if trying to -execute more than 4 threads). +"segments", which means that it will run up to four threads in parallel, +in other words it is running a thread pool up to 4 threads emulating normal +threads. To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1091,7 +1091,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1101,7 +1101,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1113,7 +1113,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -51,11 +51,9 @@ def test_fast_init_longlong_from_list(self): py3k_skip('XXX: strategies are currently broken') - if type(2 ** 50) is long: - large_int = 2 ** 30 - else: - large_int = 2 ** 50 import _cffi_backend + import sys + large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) LONGLONG = _cffi_backend.new_primitive_type('long long') P_LONGLONG = _cffi_backend.new_pointer_type(LONGLONG) LONGLONG_ARRAY = _cffi_backend.new_array_type(P_LONGLONG, None) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1297,7 +1297,6 @@ class TestBytesDictImplementation(BaseTestRDictImplementation): StrategyClass = BytesDictStrategy - #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1310,9 +1309,6 @@ self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) -## class TestMeasuringDictImplementation(BaseTestRDictImplementation): -## ImplementionClass = MeasuringDictImplementation -## DevolvedClass = MeasuringDictImplementation class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): diff --git a/pypy/tool/readdictinfo.py b/pypy/tool/readdictinfo.py deleted file mode 100644 --- a/pypy/tool/readdictinfo.py +++ /dev/null @@ -1,115 +0,0 @@ -# this is for use with a pypy-c build with multidicts and using the -# MeasuringDictImplementation -- it will create a file called -# 'dictinfo.txt' in the local directory and this file will turn the -# contents back into DictInfo objects. - -# run with python -i ! - -import sys - -if __name__ == '__main__': - infile = open(sys.argv[1]) - - curr = None - slots = [] - for line in infile: - if line == '------------------\n': - if curr: - break - curr = 1 - else: - attr, val = [s.strip() for s in line.split(':')] - slots.append(attr) - - class DictInfo(object): - __slots__ = slots - - infile = open(sys.argv[1]) - - infos = [] - - for line in infile: - if line == '------------------\n': - curr = object.__new__(DictInfo) - infos.append(curr) - else: - attr, val = [s.strip() for s in line.split(':')] - if '.' in val: - val = float(val) - else: - val = int(val) - setattr(curr, attr, val) - -def histogram(infos, keyattr, *attrs): - r = {} - for info in infos: - v = getattr(info, keyattr) - l = r.setdefault(v, [0, {}]) - l[0] += 1 - for a in attrs: - d2 = l[1].setdefault(a, {}) - v2 = getattr(info, a) - d2[v2] = d2.get(v2, 0) + 1 - return sorted(r.items()) - -def reportDictInfos(): - d = {} - stillAlive = 0 - totLifetime = 0.0 - for info in infos: - for attr in slots: - if attr == 'maxcontents': - continue - v = getattr(info, attr) - if not isinstance(v, int): - continue - d[attr] = d.get(attr, 0) + v - if info.lifetime != -1.0: - totLifetime += info.lifetime - else: - stillAlive += 1 - print 'read info on', len(infos), 'dictionaries' - if stillAlive != len(infos): - print 'average lifetime', totLifetime/(len(infos) - stillAlive), - print '('+str(stillAlive), 'still alive at exit)' - print d - -def Rify(fname, *attributes): - output = open(fname, 'w') - for attr in attributes: - print >>output, attr, - print >>output - for info in infos: - for attr in attributes: - print >>output, getattr(info, attr), - print >>output - -if __name__ == '__main__': -# reportDictInfos() - - # interactive stuff: - - import __builtin__ - - def displayhook(v): - if v is not None: - __builtin__._ = v - pprint.pprint(v) - sys.displayhook = displayhook - - import pprint - try: - import readline - except ImportError: - pass - else: - import rlcompleter - readline.parse_and_bind('tab: complete') - - if len(sys.argv) > 2: - attrs = sys.argv[2].split(',') - if attrs == ['all']: - attrs = slots - Rify("R.txt", *attrs) - - diff --git a/pypy/tool/rundictbenchmarks.py b/pypy/tool/rundictbenchmarks.py deleted file mode 100644 --- a/pypy/tool/rundictbenchmarks.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys, os - -# this file runs some benchmarks with a pypy-c that is assumed to be -# built using the MeasuringDictImplementation. - -# it should be run with pypy/goal as the cwd, and you'll -# need to hack a copy of rst2html for yourself (svn docutils -# required). - -if __name__ == '__main__': - try: - os.unlink("dictinfo.txt") - except os.error: - pass - - progs = [('pystone', ['-c', 'from test import pystone; pystone.main()']), - ('richards', ['richards.py']), - ('docutils', ['rst2html.py', '../../doc/coding-guide.txt', 'foo.html']), - ('translate', ['translate.py', '--backendopt', '--no-compile', '--batch', - 'targetrpystonedalone.py']) - ] - - EXE = sys.argv[1] - - for suffix, args in progs: - os.spawnv(os.P_WAIT, EXE, [EXE] + args) - os.rename('dictinfo.txt', 'dictinfo-%s.txt'%suffix) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -387,24 +387,17 @@ @arguments("descr") def opimpl_new(self, sizedescr): - resbox = self.execute_with_descr(rop.NEW, sizedescr) - self.metainterp.heapcache.new(resbox) - return resbox + return self.metainterp.execute_new(sizedescr) @arguments("descr") def opimpl_new_with_vtable(self, sizedescr): cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) - resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.heapcache.new(resbox) - self.metainterp.heapcache.class_now_known(resbox) - return resbox + return self.metainterp.execute_new_with_vtable(ConstInt(cls)) @arguments("box", "descr") def opimpl_new_array(self, lengthbox, itemsizedescr): - resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox) - self.metainterp.heapcache.new_array(resbox, lengthbox) - return resbox + return self.metainterp.execute_new_array(itemsizedescr, lengthbox) @specialize.arg(1) def _do_getarrayitem_gc_any(self, op, arraybox, indexbox, arraydescr): @@ -467,10 +460,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_setarrayitem_gc_any(self, arraybox, indexbox, itembox, arraydescr): - self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, - indexbox, itembox) - self.metainterp.heapcache.setarrayitem( - arraybox, indexbox, itembox, arraydescr) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -623,21 +614,22 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return - # The following test is disabled because buggy. It is supposed + self.metainterp.execute_setfield_gc(fielddescr, box, valuebox) + # The following logic is disabled because buggy. It is supposed # to be: not(we're writing null into a freshly allocated object) # but the bug is that is_unescaped() can be True even after the # field cache is cleared --- see test_ajit:test_unescaped_write_zero - if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): - self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heapcache.setfield(box, valuebox, fielddescr) + # + # if tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): + # self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) + # self.metainterp.heapcache.setfield(box, valuebox, fielddescr) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @arguments("box", "box", "box", "descr") def _opimpl_setinteriorfield_gc_any(self, array, index, value, descr): - self.execute_with_descr(rop.SETINTERIORFIELD_GC, descr, - array, index, value) + self.metainterp.execute_setinteriorfield_gc(descr, array, index, value) opimpl_setinteriorfield_gc_i = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_f = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_r = _opimpl_setinteriorfield_gc_any @@ -664,8 +656,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_raw_store(self, addrbox, offsetbox, valuebox, arraydescr): - self.execute_with_descr(rop.RAW_STORE, arraydescr, - addrbox, offsetbox, valuebox) + self.metainterp.execute_raw_store(arraydescr, + addrbox, offsetbox, valuebox) opimpl_raw_store_i = _opimpl_raw_store opimpl_raw_store_f = _opimpl_raw_store @@ -1891,6 +1883,41 @@ self.attach_debug_info(op) return resbox + def execute_new_with_vtable(self, known_class): + resbox = self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + self.heapcache.new(resbox) + self.heapcache.class_now_known(resbox) + return resbox + + def execute_new(self, typedescr): + resbox = self.execute_and_record(rop.NEW, typedescr) + self.heapcache.new(resbox) + return resbox + + def execute_new_array(self, itemsizedescr, lengthbox): + resbox = self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + self.heapcache.new_array(resbox, lengthbox) + return resbox + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + self.heapcache.setfield(box, valuebox, fielddescr) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + self.heapcache.setarrayitem(arraybox, indexbox, itembox, arraydescr) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + def attach_debug_info(self, op): if (not we_are_translated() and op is not None diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -954,15 +954,14 @@ return virtualizable_boxes, virtualref_boxes def allocate_with_vtable(self, known_class): - return self.metainterp.execute_and_record(rop.NEW_WITH_VTABLE, - None, known_class) + return self.metainterp.execute_new_with_vtable(known_class) def allocate_struct(self, typedescr): - return self.metainterp.execute_and_record(rop.NEW, typedescr) + return self.metainterp.execute_new(typedescr) def allocate_array(self, length, arraydescr): - return self.metainterp.execute_and_record(rop.NEW_ARRAY, - arraydescr, ConstInt(length)) + lengthbox = ConstInt(length) + return self.metainterp.execute_new_array(arraydescr, lengthbox) def allocate_raw_buffer(self, size): cic = self.metainterp.staticdata.callinfocollection @@ -1034,8 +1033,7 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETFIELD_GC, descr, - structbox, fieldbox) + self.metainterp.execute_setfield_gc(descr, structbox, fieldbox) def setinteriorfield(self, index, array, fieldnum, descr): if descr.is_pointer_field(): @@ -1045,8 +1043,8 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETINTERIORFIELD_GC, descr, - array, ConstInt(index), fieldbox) + self.metainterp.execute_setinteriorfield_gc(descr, array, + ConstInt(index), fieldbox) def setarrayitem_int(self, arraybox, index, fieldnum, arraydescr): self._setarrayitem(arraybox, index, fieldnum, arraydescr, INT) @@ -1059,9 +1057,8 @@ def _setarrayitem(self, arraybox, index, fieldnum, arraydescr, kind): itembox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETARRAYITEM_GC, - arraydescr, arraybox, - ConstInt(index), itembox) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + ConstInt(index), itembox) def setrawbuffer_item(self, bufferbox, fieldnum, offset, arraydescr): if arraydescr.is_array_of_pointers(): @@ -1071,8 +1068,8 @@ else: kind = INT itembox = self.decode_box(fieldnum, kind) - return self.metainterp.execute_and_record(rop.RAW_STORE, arraydescr, bufferbox, - ConstInt(offset), itembox) + self.metainterp.execute_raw_store(arraydescr, bufferbox, + ConstInt(offset), itembox) def decode_int(self, tagged): return self.decode_box(tagged, INT) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,7 +14,6 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo class BasicTests: @@ -3229,12 +3228,9 @@ self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) # Not a real lock, has all the same properties with respect to GIL # release though, so good for this test. class Lock(object): @@ -3583,6 +3579,24 @@ 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 1}) + def test_virtual_after_bridge(self): + myjitdriver = JitDriver(greens = [], reds = ["n"]) + @look_inside_iff(lambda x: isvirtual(x)) + def g(x): + return x[0] + def f(n): + while n > 0: + myjitdriver.jit_merge_point(n=n) + x = [1] + if n & 1: # bridge + n -= g(x) + else: + n -= g(x) + return n + res = self.meta_interp(f, [10]) + assert res == 0 + self.check_resops(call=0, call_may_force=0, new_array=0) + def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self): f1 = lambda n: n+1 @@ -3922,13 +3936,10 @@ self.interp_operations(f, []) def test_external_call(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) from rpython.rlib.objectmodel import invoke_around_extcall T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T) class Oups(Exception): pass @@ -3952,9 +3963,9 @@ external(lltype.nullptr(T.TO)) return len(state.l) - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 self.check_operations_history(call_release_gil=1, call_may_force=0) diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -93,6 +93,32 @@ self.resboxes.append(resbox) return resbox + def execute_new_with_vtable(self, known_class): + return self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + + def execute_new(self, typedescr): + return self.execute_and_record(rop.NEW, typedescr) + + def execute_new_array(self, itemsizedescr, lengthbox): + return self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + S = lltype.GcStruct('S') gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -8,7 +8,7 @@ self.source_pos = source_pos def copy(self): - return Token(self.name, self.source, self.source_pos) + return self.__class__(self.name, self.source, self.source_pos) def __eq__(self, other): # for testing only @@ -57,9 +57,9 @@ self.ignore = dict.fromkeys(ignore) self.matcher = self.automaton.make_lexing_code() - def get_runner(self, text, eof=False): + def get_runner(self, text, eof=False, token_class=None): return LexingDFARunner(self.matcher, self.automaton, text, - self.ignore, eof) + self.ignore, eof, token_class=token_class) def tokenize(self, text, eof=False): """Return a list of Token's from text.""" @@ -184,7 +184,12 @@ return self class LexingDFARunner(AbstractLexingDFARunner): - def __init__(self, matcher, automaton, text, ignore, eof=False): + def __init__(self, matcher, automaton, text, ignore, eof=False, + token_class=None): + if token_class is None: + self.token_class = Token + else: + self.token_class = token_class AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -195,6 +200,6 @@ assert (eof and state == -1) or 0 <= state < len(self.automaton.names) source_pos = SourcePos(index, self.lineno, self.columnno) if eof: - return Token("EOF", "EOF", source_pos) - return Token(self.automaton.names[self.last_matched_state], - text, source_pos) + return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], + text, source_pos) From noreply at buildbot.pypy.org Sat Apr 12 02:12:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 12 Apr 2014 02:12:26 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: merge py3k Message-ID: <20140412001226.C9BD31C022D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70598:b143fe6459cf Date: 2014-04-11 17:11 -0700 http://bitbucket.org/pypy/pypy/changeset/b143fe6459cf/ Log: merge py3k diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -215,7 +215,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withliststrategies", diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,3 +1,6 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less @@ -315,6 +335,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -26,8 +26,8 @@ ============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of a -regular PyPy, comparing the JITting version in both cases. It is called +listed below, it should be in theory within 25%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -55,9 +55,9 @@ interested in trying it out, you can download a Ubuntu 12.04 binary here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel -(in other words, you get a GIL effect again, but only if trying to -execute more than 4 threads). +"segments", which means that it will run up to four threads in parallel, +in other words it is running a thread pool up to 4 threads emulating normal +threads. To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1091,7 +1091,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1101,7 +1101,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1113,7 +1113,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -51,11 +51,9 @@ def test_fast_init_longlong_from_list(self): py3k_skip('XXX: strategies are currently broken') - if type(2 ** 50) is long: - large_int = 2 ** 30 - else: - large_int = 2 ** 50 import _cffi_backend + import sys + large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) LONGLONG = _cffi_backend.new_primitive_type('long long') P_LONGLONG = _cffi_backend.new_pointer_type(LONGLONG) LONGLONG_ARRAY = _cffi_backend.new_array_type(P_LONGLONG, None) diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -38,9 +38,9 @@ * taken care of by distutils.) */ # ifdef _DEBUG # error("debug first with cpython") -# pragma comment(lib,"python27.lib") +# pragma comment(lib,"python32.lib") # else -# pragma comment(lib,"python27.lib") +# pragma comment(lib,"python32.lib") # endif /* _DEBUG */ # endif #endif /* _MSC_VER */ diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -586,6 +586,10 @@ return self.len - self.pos return 0 + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class BaseKeyIterator(BaseIteratorImplementation): next_key = _new_next('key') @@ -1105,6 +1109,10 @@ w_ret = space.newtuple([new_inst, space.newtuple([w_res])]) return w_ret + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class W_DictMultiIterKeysObject(W_BaseDictMultiIterObject): def descr_next(self, space): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -871,8 +871,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a TypeCell, which may change without changing the version_tag - assert space.config.objspace.std.withmethodcache - _, w_descr = w_type._pure_lookup_where_with_method_cache( + _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) @@ -930,9 +929,8 @@ # in the class, this time taking care of the result: it can be either a # quasi-constant class attribute, or actually a TypeCell --- which we # must not cache. (It should not be None here, but you never know...) - assert space.config.objspace.std.withmethodcache - _, w_method = w_type._pure_lookup_where_with_method_cache(name, - version_tag) + _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( + name, version_tag) if w_method is None or isinstance(w_method, TypeCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1316,7 +1316,6 @@ class TestUnicodeDictImplementation(BaseTestRDictImplementation): StrategyClass = UnicodeDictStrategy - #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1330,9 +1329,6 @@ self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) -## class TestMeasuringDictImplementation(BaseTestRDictImplementation): -## ImplementionClass = MeasuringDictImplementation -## DevolvedClass = MeasuringDictImplementation class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -369,6 +369,12 @@ w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) return w_class, unwrap_cell(space, w_value) + def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): + if w_self.space.config.objspace.std.withmethodcache: + return w_self._pure_lookup_where_with_method_cache(name, version_tag) + else: + return w_self._lookup_where_all_typeobjects(name) + @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space diff --git a/pypy/tool/readdictinfo.py b/pypy/tool/readdictinfo.py deleted file mode 100644 --- a/pypy/tool/readdictinfo.py +++ /dev/null @@ -1,115 +0,0 @@ -# this is for use with a pypy-c build with multidicts and using the -# MeasuringDictImplementation -- it will create a file called -# 'dictinfo.txt' in the local directory and this file will turn the -# contents back into DictInfo objects. - -# run with python -i ! - -import sys - -if __name__ == '__main__': - infile = open(sys.argv[1]) - - curr = None - slots = [] - for line in infile: - if line == '------------------\n': - if curr: - break - curr = 1 - else: - attr, val = [s.strip() for s in line.split(':')] - slots.append(attr) - - class DictInfo(object): - __slots__ = slots - - infile = open(sys.argv[1]) - - infos = [] - - for line in infile: - if line == '------------------\n': - curr = object.__new__(DictInfo) - infos.append(curr) - else: - attr, val = [s.strip() for s in line.split(':')] - if '.' in val: - val = float(val) - else: - val = int(val) - setattr(curr, attr, val) - -def histogram(infos, keyattr, *attrs): - r = {} - for info in infos: - v = getattr(info, keyattr) - l = r.setdefault(v, [0, {}]) - l[0] += 1 - for a in attrs: - d2 = l[1].setdefault(a, {}) - v2 = getattr(info, a) - d2[v2] = d2.get(v2, 0) + 1 - return sorted(r.items()) - -def reportDictInfos(): - d = {} - stillAlive = 0 - totLifetime = 0.0 - for info in infos: - for attr in slots: - if attr == 'maxcontents': - continue - v = getattr(info, attr) - if not isinstance(v, int): - continue - d[attr] = d.get(attr, 0) + v - if info.lifetime != -1.0: - totLifetime += info.lifetime - else: - stillAlive += 1 - print 'read info on', len(infos), 'dictionaries' - if stillAlive != len(infos): - print 'average lifetime', totLifetime/(len(infos) - stillAlive), - print '('+str(stillAlive), 'still alive at exit)' - print d - -def Rify(fname, *attributes): - output = open(fname, 'w') - for attr in attributes: - print >>output, attr, - print >>output - for info in infos: - for attr in attributes: - print >>output, getattr(info, attr), - print >>output - -if __name__ == '__main__': -# reportDictInfos() - - # interactive stuff: - - import __builtin__ - - def displayhook(v): - if v is not None: - __builtin__._ = v - pprint.pprint(v) - sys.displayhook = displayhook - - import pprint - try: - import readline - except ImportError: - pass - else: - import rlcompleter - readline.parse_and_bind('tab: complete') - - if len(sys.argv) > 2: - attrs = sys.argv[2].split(',') - if attrs == ['all']: - attrs = slots - Rify("R.txt", *attrs) - - diff --git a/pypy/tool/rundictbenchmarks.py b/pypy/tool/rundictbenchmarks.py deleted file mode 100644 --- a/pypy/tool/rundictbenchmarks.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys, os - -# this file runs some benchmarks with a pypy-c that is assumed to be -# built using the MeasuringDictImplementation. - -# it should be run with pypy/goal as the cwd, and you'll -# need to hack a copy of rst2html for yourself (svn docutils -# required). - -if __name__ == '__main__': - try: - os.unlink("dictinfo.txt") - except os.error: - pass - - progs = [('pystone', ['-c', 'from test import pystone; pystone.main()']), - ('richards', ['richards.py']), - ('docutils', ['rst2html.py', '../../doc/coding-guide.txt', 'foo.html']), - ('translate', ['translate.py', '--backendopt', '--no-compile', '--batch', - 'targetrpystonedalone.py']) - ] - - EXE = sys.argv[1] - - for suffix, args in progs: - os.spawnv(os.P_WAIT, EXE, [EXE] + args) - os.rename('dictinfo.txt', 'dictinfo-%s.txt'%suffix) diff --git a/rpython/jit/backend/x86/support.py b/rpython/jit/backend/x86/support.py --- a/rpython/jit/backend/x86/support.py +++ b/rpython/jit/backend/x86/support.py @@ -7,11 +7,12 @@ extra = ['-DPYPY_X86_CHECK_SSE2'] if sys.platform != 'win32': extra += ['-msse2', '-mfpmath=sse'] + else: + extra += ['/arch:SSE2'] else: extra = [] # the -m options above are always on by default on x86-64 -if sys.platform != 'win32': - extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra +extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = extra, diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -387,24 +387,17 @@ @arguments("descr") def opimpl_new(self, sizedescr): - resbox = self.execute_with_descr(rop.NEW, sizedescr) - self.metainterp.heapcache.new(resbox) - return resbox + return self.metainterp.execute_new(sizedescr) @arguments("descr") def opimpl_new_with_vtable(self, sizedescr): cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) - resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.heapcache.new(resbox) - self.metainterp.heapcache.class_now_known(resbox) - return resbox + return self.metainterp.execute_new_with_vtable(ConstInt(cls)) @arguments("box", "descr") def opimpl_new_array(self, lengthbox, itemsizedescr): - resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox) - self.metainterp.heapcache.new_array(resbox, lengthbox) - return resbox + return self.metainterp.execute_new_array(itemsizedescr, lengthbox) @specialize.arg(1) def _do_getarrayitem_gc_any(self, op, arraybox, indexbox, arraydescr): @@ -467,10 +460,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_setarrayitem_gc_any(self, arraybox, indexbox, itembox, arraydescr): - self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, - indexbox, itembox) - self.metainterp.heapcache.setarrayitem( - arraybox, indexbox, itembox, arraydescr) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -623,21 +614,22 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return - # The following test is disabled because buggy. It is supposed + self.metainterp.execute_setfield_gc(fielddescr, box, valuebox) + # The following logic is disabled because buggy. It is supposed # to be: not(we're writing null into a freshly allocated object) # but the bug is that is_unescaped() can be True even after the # field cache is cleared --- see test_ajit:test_unescaped_write_zero - if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): - self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heapcache.setfield(box, valuebox, fielddescr) + # + # if tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): + # self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) + # self.metainterp.heapcache.setfield(box, valuebox, fielddescr) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @arguments("box", "box", "box", "descr") def _opimpl_setinteriorfield_gc_any(self, array, index, value, descr): - self.execute_with_descr(rop.SETINTERIORFIELD_GC, descr, - array, index, value) + self.metainterp.execute_setinteriorfield_gc(descr, array, index, value) opimpl_setinteriorfield_gc_i = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_f = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_r = _opimpl_setinteriorfield_gc_any @@ -664,8 +656,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_raw_store(self, addrbox, offsetbox, valuebox, arraydescr): - self.execute_with_descr(rop.RAW_STORE, arraydescr, - addrbox, offsetbox, valuebox) + self.metainterp.execute_raw_store(arraydescr, + addrbox, offsetbox, valuebox) opimpl_raw_store_i = _opimpl_raw_store opimpl_raw_store_f = _opimpl_raw_store @@ -1891,6 +1883,41 @@ self.attach_debug_info(op) return resbox + def execute_new_with_vtable(self, known_class): + resbox = self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + self.heapcache.new(resbox) + self.heapcache.class_now_known(resbox) + return resbox + + def execute_new(self, typedescr): + resbox = self.execute_and_record(rop.NEW, typedescr) + self.heapcache.new(resbox) + return resbox + + def execute_new_array(self, itemsizedescr, lengthbox): + resbox = self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + self.heapcache.new_array(resbox, lengthbox) + return resbox + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + self.heapcache.setfield(box, valuebox, fielddescr) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + self.heapcache.setarrayitem(arraybox, indexbox, itembox, arraydescr) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + def attach_debug_info(self, op): if (not we_are_translated() and op is not None diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -954,15 +954,14 @@ return virtualizable_boxes, virtualref_boxes def allocate_with_vtable(self, known_class): - return self.metainterp.execute_and_record(rop.NEW_WITH_VTABLE, - None, known_class) + return self.metainterp.execute_new_with_vtable(known_class) def allocate_struct(self, typedescr): - return self.metainterp.execute_and_record(rop.NEW, typedescr) + return self.metainterp.execute_new(typedescr) def allocate_array(self, length, arraydescr): - return self.metainterp.execute_and_record(rop.NEW_ARRAY, - arraydescr, ConstInt(length)) + lengthbox = ConstInt(length) + return self.metainterp.execute_new_array(arraydescr, lengthbox) def allocate_raw_buffer(self, size): cic = self.metainterp.staticdata.callinfocollection @@ -1034,8 +1033,7 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETFIELD_GC, descr, - structbox, fieldbox) + self.metainterp.execute_setfield_gc(descr, structbox, fieldbox) def setinteriorfield(self, index, array, fieldnum, descr): if descr.is_pointer_field(): @@ -1045,8 +1043,8 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETINTERIORFIELD_GC, descr, - array, ConstInt(index), fieldbox) + self.metainterp.execute_setinteriorfield_gc(descr, array, + ConstInt(index), fieldbox) def setarrayitem_int(self, arraybox, index, fieldnum, arraydescr): self._setarrayitem(arraybox, index, fieldnum, arraydescr, INT) @@ -1059,9 +1057,8 @@ def _setarrayitem(self, arraybox, index, fieldnum, arraydescr, kind): itembox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETARRAYITEM_GC, - arraydescr, arraybox, - ConstInt(index), itembox) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + ConstInt(index), itembox) def setrawbuffer_item(self, bufferbox, fieldnum, offset, arraydescr): if arraydescr.is_array_of_pointers(): @@ -1071,8 +1068,8 @@ else: kind = INT itembox = self.decode_box(fieldnum, kind) - return self.metainterp.execute_and_record(rop.RAW_STORE, arraydescr, bufferbox, - ConstInt(offset), itembox) + self.metainterp.execute_raw_store(arraydescr, bufferbox, + ConstInt(offset), itembox) def decode_int(self, tagged): return self.decode_box(tagged, INT) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,7 +14,6 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo class BasicTests: @@ -3229,12 +3228,9 @@ self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) # Not a real lock, has all the same properties with respect to GIL # release though, so good for this test. class Lock(object): @@ -3583,6 +3579,24 @@ 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 1}) + def test_virtual_after_bridge(self): + myjitdriver = JitDriver(greens = [], reds = ["n"]) + @look_inside_iff(lambda x: isvirtual(x)) + def g(x): + return x[0] + def f(n): + while n > 0: + myjitdriver.jit_merge_point(n=n) + x = [1] + if n & 1: # bridge + n -= g(x) + else: + n -= g(x) + return n + res = self.meta_interp(f, [10]) + assert res == 0 + self.check_resops(call=0, call_may_force=0, new_array=0) + def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self): f1 = lambda n: n+1 @@ -3922,13 +3936,10 @@ self.interp_operations(f, []) def test_external_call(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) from rpython.rlib.objectmodel import invoke_around_extcall T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T) class Oups(Exception): pass @@ -3952,9 +3963,9 @@ external(lltype.nullptr(T.TO)) return len(state.l) - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 self.check_operations_history(call_release_gil=1, call_may_force=0) diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -93,6 +93,32 @@ self.resboxes.append(resbox) return resbox + def execute_new_with_vtable(self, known_class): + return self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + + def execute_new(self, typedescr): + return self.execute_and_record(rop.NEW, typedescr) + + def execute_new_array(self, itemsizedescr, lengthbox): + return self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + S = lltype.GcStruct('S') gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -8,7 +8,7 @@ self.source_pos = source_pos def copy(self): - return Token(self.name, self.source, self.source_pos) + return self.__class__(self.name, self.source, self.source_pos) def __eq__(self, other): # for testing only @@ -57,9 +57,9 @@ self.ignore = dict.fromkeys(ignore) self.matcher = self.automaton.make_lexing_code() - def get_runner(self, text, eof=False): + def get_runner(self, text, eof=False, token_class=None): return LexingDFARunner(self.matcher, self.automaton, text, - self.ignore, eof) + self.ignore, eof, token_class=token_class) def tokenize(self, text, eof=False): """Return a list of Token's from text.""" @@ -184,7 +184,12 @@ return self class LexingDFARunner(AbstractLexingDFARunner): - def __init__(self, matcher, automaton, text, ignore, eof=False): + def __init__(self, matcher, automaton, text, ignore, eof=False, + token_class=None): + if token_class is None: + self.token_class = Token + else: + self.token_class = token_class AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -195,6 +200,6 @@ assert (eof and state == -1) or 0 <= state < len(self.automaton.names) source_pos = SourcePos(index, self.lineno, self.columnno) if eof: - return Token("EOF", "EOF", source_pos) - return Token(self.automaton.names[self.last_matched_state], - text, source_pos) + return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], + text, source_pos) diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -331,7 +331,8 @@ ch2 = ord(s[pos]) # Check for low surrogate and combine the two to # form a UCS4 value - if ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF: + if ((allow_surrogates or MAXUNICODE < 65536) and + ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF): ch3 = ((ch - 0xD800) << 10 | (ch2 - 0xDC00)) + 0x10000 pos += 1 _encodeUCS4(result, ch3) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -803,3 +803,20 @@ u, len(u), True) == r'\ud800\udc00' assert runicode.unicode_encode_raw_unicode_escape( u, len(u), True) == r'\ud800\udc00' + + def test_encode_surrogate_pair_utf8(self): + u = runicode.UNICHR(0xD800) + runicode.UNICHR(0xDC00) + if runicode.MAXUNICODE < 65536: + # Narrow unicode build, consider utf16 surrogate pairs + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=False) == '\xf0\x90\x80\x80' + else: + # Wide unicode build, merge utf16 surrogate pairs only when allowed + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' + # Surrogates not merged, encoding fails. + py.test.raises( + UnicodeEncodeError, runicode.unicode_encode_utf_8, + u, len(u), True, allow_surrogates=False) From noreply at buildbot.pypy.org Sat Apr 12 09:24:47 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 12 Apr 2014 09:24:47 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: better debug prints when translated Message-ID: <20140412072447.654C01C022D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70599:0841797a4108 Date: 2014-04-12 09:23 +0200 http://bitbucket.org/pypy/pypy/changeset/0841797a4108/ Log: better debug prints when translated diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -526,7 +526,7 @@ values = [self.getvalue(arg) for arg in jumpop.getarglist()] debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + virtual_state.debug_print("Looking for ", metainterp_sd=self.optimizer.metainterp_sd) for target in cell_token.target_tokens: if not target.virtual_state: @@ -546,7 +546,7 @@ else: debugmsg = 'Matched ' except VirtualStatesCantMatch, e: - target.virtual_state.debug_print(debugmsg, e.state.bad) + target.virtual_state.debug_print(debugmsg, e.state.bad, metainterp_sd=self.optimizer.metainterp_sd) continue assert patchguardop is not None or (extra_guards == [] and len(target.short_preamble) == 1) diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -1,5 +1,5 @@ from rpython.jit.metainterp import resume -from rpython.jit.metainterp.history import BoxInt, ConstInt, BoxPtr, Const +from rpython.jit.metainterp.history import BoxInt, ConstInt, BoxPtr, Const, ConstPtr from rpython.jit.metainterp.optimizeopt import virtualize from rpython.jit.metainterp.optimizeopt.intutils import IntUnbounded from rpython.jit.metainterp.optimizeopt.optimizer import (LEVEL_CONSTANT, @@ -82,7 +82,7 @@ def _enum(self, virtual_state): raise NotImplementedError - def debug_print(self, indent, seen, bad): + def debug_print(self, indent, seen, bad, metainterp_sd): mark = '' if self in bad: mark = '*' @@ -90,7 +90,7 @@ if self not in seen: seen[self] = True for s in self.fieldstate: - s.debug_print(indent + " ", seen, bad) + s.debug_print(indent + " ", seen, bad, metainterp_sd) else: debug_print(indent + " ...") @@ -405,22 +405,34 @@ self.position_in_notvirtuals = len(virtual_state.notvirtuals) virtual_state.notvirtuals.append(self) - def debug_print(self, indent, seen, bad): + def debug_print(self, indent, seen, bad, metainterp_sd=None): mark = '' if self in bad: mark = '*' - if we_are_translated(): - l = {LEVEL_UNKNOWN: 'Unknown', - LEVEL_NONNULL: 'NonNull', - LEVEL_KNOWNCLASS: 'KnownClass', - LEVEL_CONSTANT: 'Constant', - }[self.level] + if self.level == LEVEL_UNKNOWN: + l = "Unknown" + elif self.level == LEVEL_NONNULL: + l = "NonNull" + elif self.level == LEVEL_KNOWNCLASS: + addr = self.known_class.getaddr() + if metainterp_sd: + name = metainterp_sd.get_name_from_address(addr) + else: + name = "?" + l = "KnownClass(%s)" % name else: - l = {LEVEL_UNKNOWN: 'Unknown', - LEVEL_NONNULL: 'NonNull', - LEVEL_KNOWNCLASS: 'KnownClass(%r)' % self.known_class, - LEVEL_CONSTANT: 'Constant(%r)' % self.constbox, - }[self.level] + assert self.level == LEVEL_CONSTANT + const = self.constbox + if isinstance(const, ConstInt): + l = "ConstInt(%s)" % (const.value, ) + elif isinstance(const, ConstPtr): + if const.value: + l = "ConstPtr" + else: + l = "ConstPtr(null)" + else: + assert isinstance(const, ConstFloat) + l = "ConstFloat(%s)" % cons.getfloat() lb = '' if self.lenbound: @@ -481,13 +493,13 @@ return inputargs - def debug_print(self, hdr='', bad=None): + def debug_print(self, hdr='', bad=None, metainterp_sd=None): if bad is None: bad = {} debug_print(hdr + "VirtualState():") seen = {} for s in self.state: - s.debug_print(" ", seen, bad) + s.debug_print(" ", seen, bad, metainterp_sd) class VirtualStateAdder(resume.ResumeDataVirtualAdder): From noreply at buildbot.pypy.org Sat Apr 12 10:36:57 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 10:36:57 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Rewrite the DictProxy type, so that it does not depend on Message-ID: <20140412083657.8B4A71D24BB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70600:bb19ba3f6538 Date: 2014-04-12 10:35 +0200 http://bitbucket.org/pypy/pypy/changeset/bb19ba3f6538/ Log: Rewrite the DictProxy type, so that it does not depend on collections.abc which imports (and freezes) a lot of stdlib modules. +Translation fixes. diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -5,8 +5,8 @@ from pypy.module.cpyext.pyobject import PyObject, PyObjectP, borrow_from from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall +from pypy.module.cpyext.dictproxyobject import W_DictProxyObject from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import specialize @cpython_api([], PyObject) def PyDict_New(space): @@ -195,25 +195,9 @@ return 0 return 1 - at specialize.memo() -def make_frozendict(space): - return space.appexec([], '''(): - from collections.abc import Mapping - class FrozenDict(Mapping): - def __init__(self, *args, **kwargs): - self._d = dict(*args, **kwargs) - def __iter__(self): - return iter(self._d) - def __len__(self): - return len(self._d) - def __getitem__(self, key): - return self._d[key] - return FrozenDict''') - @cpython_api([PyObject], PyObject) def PyDictProxy_New(space, w_dict): - w_frozendict = make_frozendict(space) - return space.call_function(w_frozendict, w_dict) + return space.wrap(W_DictProxyObject(w_dict)) @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyDict_HasOnlyStringKeys(space, w_dict): diff --git a/pypy/module/cpyext/dictproxyobject.py b/pypy/module/cpyext/dictproxyobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/dictproxyobject.py @@ -0,0 +1,61 @@ +# Read-only proxy for mappings. PyPy does not have a separate type for +# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping. + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from pypy.interpreter.typedef import TypeDef, interp2app + +class W_DictProxyObject(W_Root): + "Read-only proxy for mappings." + + def __init__(self, w_mapping): + self.w_mapping = w_mapping + + def descr_len(self, space): + return space.len(self.w_mapping) + + def descr_getitem(self, space, w_key): + return space.getitem(self.w_mapping, w_key) + + def descr_contains(self, space, w_key): + return space.contains(self.w_mapping, w_key) + + def descr_iter(self, space): + return space.iter(self.w_mapping) + + def descr_str(self, space): + return space.str(self.w_mapping) + + def descr_repr(self, space): + return space.repr(self.w_mapping) + + @unwrap_spec(w_default=WrappedDefault(None)) + def get_w(self, space, w_key, w_default): + return space.call_method(self.w_mapping, "get", w_key, w_default) + + def keys_w(self, space): + return space.call_method(self.w_mapping, "keys") + + def values_w(self, space): + return space.call_method(self.w_mapping, "values") + + def items_w(self, space): + return space.call_method(self.w_mapping, "items") + + def copy_w(self, space): + return space.call_method(self.w_mapping, "copy") + +W_DictProxyObject.typedef = TypeDef( + 'mappingproxy', + __len__ = interp2app(W_DictProxyObject.descr_len), + __getitem__ = interp2app(W_DictProxyObject.descr_getitem), + __contains__ = interp2app(W_DictProxyObject.descr_contains), + __iter__ = interp2app(W_DictProxyObject.descr_iter), + __str__ = interp2app(W_DictProxyObject.descr_str), + __repr__ = interp2app(W_DictProxyObject.descr_repr), + get = interp2app(W_DictProxyObject.get_w), + keys = interp2app(W_DictProxyObject.keys_w), + values = interp2app(W_DictProxyObject.values_w), + items = interp2app(W_DictProxyObject.items_w), + copy = interp2app(W_DictProxyObject.copy_w) + ) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -53,8 +53,9 @@ @cpython_api([PyObject], PyObject) def PyImport_ReloadModule(space, w_mod): - from pypy.module.imp.importing import reload - return reload(space, w_mod) + w_import = space.builtin.get('__import__') + w_imp = space.call_function(w_import, space.wrap('imp')) + return space.call_method(w_imp, 'reload', w_mod) @cpython_api([CONST_STRING], PyObject) def PyImport_AddModule(space, name): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -129,6 +129,14 @@ find_module=interp2app(W_NullImporter.find_module_w), ) +def _prepare_module(space, w_mod, filename, pkgdir): + w = space.wrap + space.sys.setmodule(w_mod) + space.setattr(w_mod, w('__file__'), space.wrap(filename)) + space.setattr(w_mod, w('__doc__'), space.w_None) + if pkgdir is not None: + space.setattr(w_mod, w('__path__'), space.newlist([w(pkgdir)])) + def add_module(space, w_name): w_mod = check_sys_modules(space, w_name) if w_mod is None: @@ -377,6 +385,15 @@ if const is not None and isinstance(const, PyCode): update_code_filenames(space, const, pathname, oldname) +def _get_long(s): + a = ord(s[0]) + b = ord(s[1]) + c = ord(s[2]) + d = ord(s[3]) + if d >= 0x80: + d -= 0x100 + return a | (b<<8) | (c<<16) | (d<<24) + def read_compiled_module(space, cpathname, strbuf): """ Read a code object from a file and check it for validity """ @@ -386,3 +403,25 @@ raise oefmt(space.w_ImportError, "Non-code object in %s", cpathname) return w_code + at jit.dont_look_inside +def load_compiled_module(space, w_modulename, w_mod, cpathname, magic, + timestamp, source, write_paths=True): + """ + Load a module from a compiled file, execute it, and return its + module object. + """ + if magic != get_pyc_magic(space): + raise oefmt(space.w_ImportError, "Bad magic number in %s", cpathname) + #print "loading pyc file:", cpathname + code_w = read_compiled_module(space, cpathname, source) + try: + optimize = space.sys.get_flag('optimize') + except RuntimeError: + # during bootstrapping + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + + exec_code_module(space, w_mod, code_w, cpathname, cpathname, write_paths) + + return w_mod diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -68,7 +68,13 @@ if not space.config.objspace.usemodules.cpyext: raise OperationError(space.w_ImportError, space.wrap( "Not implemented")) - importing.load_c_extension(space, filename, space.str_w(w_modulename)) + + # the next line is mandatory to init cpyext + space.getbuiltinmodule("cpyext") + + from pypy.module.cpyext.api import load_extension_module + load_extension_module(space, filename, space.str_w(w_modulename)) + return importing.check_sys_modules(space, w_modulename) def new_module(space, w_name): From noreply at buildbot.pypy.org Sat Apr 12 11:57:32 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 11:57:32 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: oops Message-ID: <20140412095732.D23121C07C7@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70601:fa32db9e5db8 Date: 2014-04-12 11:37 +0200 http://bitbucket.org/pypy/pypy/changeset/fa32db9e5db8/ Log: oops diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -653,7 +653,7 @@ try: importer = hook(filename) except ImportError: - pass + continue # It's the name of a directory or a zip file. # put the filename in sys.path[0] and import # the module __main__ From noreply at buildbot.pypy.org Sat Apr 12 11:57:34 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 11:57:34 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Update list of tests Message-ID: <20140412095734.101071C07C7@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70602:c6251c15f906 Date: 2014-04-12 11:55 +0200 http://bitbucket.org/pypy/pypy/changeset/c6251c15f906/ Log: Update list of tests diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -98,7 +98,7 @@ RegrTest('test___all__.py', core=True), RegrTest('test___future__.py', core=True), RegrTest('test__locale.py', usemodules='_locale'), - #RegrTest('test__osx_support.py'), + RegrTest('test__osx_support.py'), RegrTest('test_abc.py'), RegrTest('test_abstract_numbers.py'), RegrTest('test_aifc.py'), @@ -118,6 +118,7 @@ RegrTest('test_binop.py', core=True), RegrTest('test_bisect.py', core=True, usemodules='_bisect'), RegrTest('test_bool.py', core=True), + RegrTest('test_buffer.py', core=True), RegrTest('test_bufio.py', core=True), RegrTest('test_builtin.py', core=True, usemodules='binascii'), RegrTest('test_bytes.py', usemodules='struct binascii'), @@ -125,8 +126,8 @@ RegrTest('test_calendar.py'), RegrTest('test_call.py', core=True), RegrTest('test_capi.py', usemodules='cpyext'), - RegrTest('test_cfgparser.py'), RegrTest('test_cgi.py'), + RegrTest('test_cgitb.py'), RegrTest('test_charmapcodec.py', core=True), RegrTest('test_class.py', core=True), RegrTest('test_cmath.py', core=True), @@ -134,6 +135,7 @@ RegrTest('test_cmd_line.py'), RegrTest('test_cmd_line_script.py'), RegrTest('test_code.py', core=True), + RegrTest('test_code_module.py'), RegrTest('test_codeccallbacks.py', core=True), RegrTest('test_codecencodings_cn.py', usemodules='_multibytecodec'), RegrTest('test_codecencodings_hk.py', usemodules='_multibytecodec'), @@ -157,11 +159,13 @@ RegrTest('test_complex.py', core=True), RegrTest('test_concurrent_futures.py', skip="XXX: deadlocks" if sys.platform == 'win32' else False), + RegrTest('test_configparser.py'), RegrTest('test_contains.py', core=True), RegrTest('test_contextlib.py', usemodules="thread"), RegrTest('test_copy.py', core=True), RegrTest('test_copyreg.py', core=True), RegrTest('test_cprofile.py'), + RegrTest('test_crashers.py'), RegrTest('test_crypt.py', usemodules='crypt'), RegrTest('test_csv.py', usemodules='_csv'), RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"), @@ -177,6 +181,7 @@ RegrTest('test_deque.py', core=True, usemodules='_collections'), RegrTest('test_descr.py', core=True, usemodules='_weakref'), RegrTest('test_descrtut.py', core=True), + RegrTest('test_devpoll.py'), RegrTest('test_dict.py', core=True), RegrTest('test_dictcomps.py', core=True), RegrTest('test_dictviews.py', core=True), @@ -189,7 +194,7 @@ RegrTest('test_dummy_thread.py', core=True), RegrTest('test_dummy_threading.py', core=True), RegrTest('test_dynamic.py'), - RegrTest('test_email.py'), + RegrTest('test_email', skip="XXX is a directory"), RegrTest('test_enumerate.py', core=True), RegrTest('test_eof.py', core=True), RegrTest('test_epoll.py'), @@ -197,8 +202,10 @@ RegrTest('test_exception_variations.py'), RegrTest('test_exceptions.py', core=True), RegrTest('test_extcall.py', core=True), + RegrTest('test_faulthandler.py'), RegrTest('test_fcntl.py', usemodules='fcntl'), RegrTest('test_file.py', usemodules="posix", core=True), + RegrTest('test_file_eintr.py'), RegrTest('test_filecmp.py', core=True), RegrTest('test_fileinput.py', core=True), RegrTest('test_fileio.py'), @@ -213,8 +220,6 @@ RegrTest('test_funcattrs.py', core=True), RegrTest('test_functools.py'), RegrTest('test_future.py', core=True), - RegrTest('test_future1.py', core=True), - RegrTest('test_future2.py', core=True), RegrTest('test_future3.py', core=True), RegrTest('test_future4.py', core=True), RegrTest('test_future5.py', core=True), @@ -225,6 +230,7 @@ RegrTest('test_genexps.py', core=True, usemodules='_weakref'), RegrTest('test_getargs2.py', usemodules='binascii', skip=True), RegrTest('test_getopt.py', core=True), + RegrTest('test_getpass.py'), RegrTest('test_gettext.py'), RegrTest('test_glob.py', core=True), RegrTest('test_global.py', core=True), @@ -241,22 +247,25 @@ RegrTest('test_http_cookies.py'), RegrTest('test_httplib.py'), RegrTest('test_httpservers.py'), + RegrTest('test_idle.py'), RegrTest('test_imaplib.py'), + RegrTest('test_imghdr.py'), RegrTest('test_imp.py', core=True, usemodules='thread'), RegrTest('test_import.py', core=True), RegrTest('test_importhooks.py', core=True), - RegrTest('test_importlib.py'), + RegrTest('test_importlib', 'XXX is a directory'), RegrTest('test_index.py'), RegrTest('test_inspect.py'), RegrTest('test_int.py', core=True), RegrTest('test_int_literal.py', core=True), RegrTest('test_io.py', core=True, usemodules='array binascii'), RegrTest('test_ioctl.py'), + RegrTest('test_ipaddress.py'), RegrTest('test_isinstance.py', core=True), RegrTest('test_iter.py', core=True), RegrTest('test_iterlen.py', core=True, usemodules="_collections itertools"), RegrTest('test_itertools.py', core=True, usemodules="itertools struct"), - RegrTest('test_json.py'), + RegrTest('test_json', skip="XXX is a directory"), RegrTest('test_keywordonlyarg.py'), RegrTest('test_kqueue.py'), RegrTest('test_largefile.py'), @@ -268,8 +277,11 @@ RegrTest('test_logging.py', usemodules='thread'), RegrTest('test_long.py', core=True), RegrTest('test_longexp.py', core=True), + RegrTest('test_lzma.py'), RegrTest('test_macpath.py'), + RegrTest('test_macurl2path.py'), RegrTest('test_mailbox.py'), + RegrTest('test_mailcap.py'), RegrTest('test_marshal.py', core=True), RegrTest('test_math.py', core=True, usemodules='math'), RegrTest('test_memoryio.py'), @@ -283,7 +295,7 @@ RegrTest('test_msilib.py'), RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'), RegrTest('test_multiprocessing.py', skip="XXX: deadlocks the buildbots"), - RegrTest('test_mutants.py', core="possibly"), + RegrTest('test_namespace_pkgs.py'), RegrTest('test_netrc.py'), RegrTest('test_nis.py'), RegrTest('test_nntplib.py'), @@ -306,7 +318,9 @@ RegrTest('test_pep292.py'), RegrTest('test_pep3120.py'), RegrTest('test_pep3131.py'), + RegrTest('test_pep3151.py'), RegrTest('test_pep352.py'), + RegrTest('test_pep380.py'), RegrTest('test_pickle.py', core=True), RegrTest('test_pickletools.py', core=False), RegrTest('test_pipes.py'), @@ -327,6 +341,7 @@ RegrTest('test_property.py', core=True), RegrTest('test_pstats.py'), RegrTest('test_pty.py', usemodules='fcntl termios select'), + RegrTest('test_pulldom.py'), RegrTest('test_pwd.py', usemodules="pwd"), RegrTest('test_py_compile.py'), RegrTest('test_pyclbr.py'), @@ -367,6 +382,7 @@ RegrTest('test_sqlite.py', usemodules="thread _rawffi zlib"), RegrTest('test_ssl.py', usemodules='_ssl _socket select'), RegrTest('test_startfile.py'), + RegrTest('test_stat.py'), RegrTest('test_strftime.py'), RegrTest('test_string.py', core=True), RegrTest('test_stringprep.py'), @@ -380,6 +396,7 @@ RegrTest('test_sunau.py'), RegrTest('test_sundry.py'), RegrTest('test_super.py', core=True), + RegrTest('test_support.py'), RegrTest('test_symtable.py', skip="implementation detail"), RegrTest('test_syntax.py', core=True), RegrTest('test_sys.py', core=True, usemodules='struct'), @@ -404,7 +421,7 @@ RegrTest('test_timeout.py'), RegrTest('test_tk.py'), RegrTest('test_tokenize.py'), - #RegrTest('test_tools.py'), + RegrTest('test_tools.py'), RegrTest('test_trace.py'), RegrTest('test_traceback.py', core=True), RegrTest('test_ttk_guionly.py'), @@ -433,17 +450,20 @@ RegrTest('test_userstring.py', core=True), RegrTest('test_uu.py'), RegrTest('test_uuid.py'), + RegrTest('test_venv.py'), RegrTest('test_wait3.py', usemodules="thread"), RegrTest('test_wait4.py', usemodules="thread"), RegrTest('test_warnings.py', core=True), RegrTest('test_wave.py'), RegrTest('test_weakref.py', core=True, usemodules='_weakref'), RegrTest('test_weakset.py'), + RegrTest('test_webbrowser.py'), RegrTest('test_winreg.py'), RegrTest('test_winsound.py'), RegrTest('test_with.py'), RegrTest('test_wsgiref.py'), RegrTest('test_xdrlib.py'), + RegrTest('test_xml_dom_minicompat.py'), RegrTest('test_xml_etree.py'), RegrTest('test_xml_etree_c.py'), RegrTest('test_xmlrpc.py'), From noreply at buildbot.pypy.org Sat Apr 12 21:24:21 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 21:24:21 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add gc_collect() after del, to prevent deadlock. Message-ID: <20140412192421.BDAC01C022D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70603:6296484b2ec6 Date: 2014-04-12 13:07 +0200 http://bitbucket.org/pypy/pypy/changeset/6296484b2ec6/ Log: Add gc_collect() after del, to prevent deadlock. diff --git a/lib-python/3/test/test_concurrent_futures.py b/lib-python/3/test/test_concurrent_futures.py --- a/lib-python/3/test/test_concurrent_futures.py +++ b/lib-python/3/test/test_concurrent_futures.py @@ -143,6 +143,7 @@ executor.map(abs, range(-5, 5)) threads = executor._threads del executor + test.support.gc_collect() for t in threads: t.join() From noreply at buildbot.pypy.org Sat Apr 12 21:24:22 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 21:24:22 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Skip a hanging test in test_sys Message-ID: <20140412192422.DDB651C022D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70604:06c40fed05e2 Date: 2014-04-12 15:19 +0200 http://bitbucket.org/pypy/pypy/changeset/06c40fed05e2/ Log: Skip a hanging test in test_sys diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -213,6 +213,7 @@ finally: sys.setrecursionlimit(oldlimit) + @unittest.skipIf(True, 'Fixme: hangs with pypy') def test_recursionlimit_fatalerror(self): # A fatal error occurs if a second recursion limit is hit when recovering # from a first one. From noreply at buildbot.pypy.org Sat Apr 12 21:24:24 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 21:24:24 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Introduce gateway.Unwrapper, a convenient way to write custom unwrap_spec functions, Message-ID: <20140412192424.17A881C022D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70605:fb069da00160 Date: 2014-04-12 17:57 +0200 http://bitbucket.org/pypy/pypy/changeset/fb069da00160/ Log: Introduce gateway.Unwrapper, a convenient way to write custom unwrap_spec functions, Similar to the "O&" spec in PyArg_ParseTuple. Use it in binascii: "a2b" functions now accept ASCII-only unicode strings. (CPython Issue #13637) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -53,10 +53,24 @@ #________________________________________________________________ + +class Unwrapper(object): + """A base class for custom unwrap_spec items. + + Subclasses must override unwrap(). + """ + def _freeze_(self): + return True + + def unwrap(self, space, w_value): + """NOT_RPYTHON""" + raise NotImplementedError + + class UnwrapSpecRecipe(object): "NOT_RPYTHON" - bases_order = [W_Root, ObjSpace, Arguments, object] + bases_order = [W_Root, ObjSpace, Arguments, Unwrapper, object] def dispatch(self, el, *args): if isinstance(el, str): @@ -159,6 +173,9 @@ def visit_truncatedint_w(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit__Unwrapper(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit__ObjSpace(self, el, app_sig): self.orig_arg() @@ -218,6 +235,10 @@ self.run_args.append("space.descr_self_interp_w(%s, %s)" % (self.use(typ), self.scopenext())) + def visit__Unwrapper(self, typ): + self.run_args.append("%s().unwrap(space, %s)" % + (self.use(typ), self.scopenext())) + def visit__ObjSpace(self, el): self.run_args.append('space') @@ -364,6 +385,10 @@ self.unwrap.append("space.descr_self_interp_w(%s, %s)" % (self.use(typ), self.nextarg())) + def visit__Unwrapper(self, typ): + self.unwrap.append("%s().unwrap(space, %s)" % + (self.use(typ), self.nextarg())) + def visit__ObjSpace(self, el): if self.finger > 1: raise FastFuncNotSupported diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -531,6 +531,23 @@ raises(gateway.OperationError, space.call_function, w_app_g3_u, w(42)) + def test_interp2app_unwrap_spec_unwrapper(self): + space = self.space + class Unwrapper(gateway.Unwrapper): + def unwrap(self, space, w_value): + return space.int_w(w_value) + + w = space.wrap + def g3_u(space, value): + return space.wrap(value + 1) + app_g3_u = gateway.interp2app_temp(g3_u, + unwrap_spec=[gateway.ObjSpace, + Unwrapper]) + assert self.space.eq_w( + space.call_function(w(app_g3_u), w(42)), w(43)) + raises(gateway.OperationError, space.call_function, + w(app_g3_u), w(None)) + def test_interp2app_classmethod(self): space = self.space w = space.wrap diff --git a/pypy/module/binascii/interp_base64.py b/pypy/module/binascii/interp_base64.py --- a/pypy/module/binascii/interp_base64.py +++ b/pypy/module/binascii/interp_base64.py @@ -2,6 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.rstring import StringBuilder from pypy.module.binascii.interp_binascii import raise_Error +from pypy.module.binascii.interp_binascii import AsciiBufferUnwrapper from rpython.rlib.rarithmetic import ovfcheck # ____________________________________________________________ @@ -34,8 +35,7 @@ table_a2b_base64 = ''.join(map(_transform, table_a2b_base64)) assert len(table_a2b_base64) == 256 - - at unwrap_spec(ascii='bufferstr') + at unwrap_spec(ascii=AsciiBufferUnwrapper) def a2b_base64(space, ascii): "Decode a line of base64 data." diff --git a/pypy/module/binascii/interp_binascii.py b/pypy/module/binascii/interp_binascii.py --- a/pypy/module/binascii/interp_binascii.py +++ b/pypy/module/binascii/interp_binascii.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import Unwrapper class Cache: def __init__(self, space): @@ -13,3 +14,11 @@ def raise_Incomplete(space, msg): w_error = space.fromcache(Cache).w_incomplete raise OperationError(w_error, space.wrap(msg)) + +# a2b functions accept bytes and buffers, but also ASCII strings. +class AsciiBufferUnwrapper(Unwrapper): + def unwrap(self, space, w_value): + if space.isinstance_w(w_value, space.w_unicode): + w_value = space.call_method(w_value, "encode", space.wrap("ascii")) + return space.bufferstr_w(w_value) + diff --git a/pypy/module/binascii/interp_hexlify.py b/pypy/module/binascii/interp_hexlify.py --- a/pypy/module/binascii/interp_hexlify.py +++ b/pypy/module/binascii/interp_hexlify.py @@ -3,6 +3,7 @@ from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import ovfcheck from pypy.module.binascii.interp_binascii import raise_Error +from pypy.module.binascii.interp_binascii import AsciiBufferUnwrapper # ____________________________________________________________ @@ -42,7 +43,7 @@ raise_Error(space, 'Non-hexadecimal digit found') _char2value._always_inline_ = True - at unwrap_spec(hexstr='bufferstr') + at unwrap_spec(hexstr=AsciiBufferUnwrapper) def unhexlify(space, hexstr): '''Binary data of hexadecimal representation. hexstr must contain an even number of hex digits (upper or lower case). diff --git a/pypy/module/binascii/interp_hqx.py b/pypy/module/binascii/interp_hqx.py --- a/pypy/module/binascii/interp_hqx.py +++ b/pypy/module/binascii/interp_hqx.py @@ -2,6 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.rstring import StringBuilder from pypy.module.binascii.interp_binascii import raise_Error, raise_Incomplete +from pypy.module.binascii.interp_binascii import AsciiBufferUnwrapper from rpython.rlib.rarithmetic import ovfcheck # ____________________________________________________________ @@ -62,7 +63,7 @@ ] table_a2b_hqx = ''.join(map(chr, table_a2b_hqx)) - at unwrap_spec(ascii='bufferstr') + at unwrap_spec(ascii=AsciiBufferUnwrapper) def a2b_hqx(space, ascii): """Decode .hqx coding. Returns (bin, done).""" diff --git a/pypy/module/binascii/interp_qp.py b/pypy/module/binascii/interp_qp.py --- a/pypy/module/binascii/interp_qp.py +++ b/pypy/module/binascii/interp_qp.py @@ -1,5 +1,6 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.rstring import StringBuilder +from pypy.module.binascii.interp_binascii import AsciiBufferUnwrapper MAXLINESIZE = 76 @@ -14,7 +15,7 @@ return ord(c) - (ord('a') - 10) hexval._always_inline_ = True - at unwrap_spec(data='bufferstr', header=int) + at unwrap_spec(data=AsciiBufferUnwrapper, header=int) def a2b_qp(space, data, header=0): "Decode a string of qp-encoded data." diff --git a/pypy/module/binascii/interp_uu.py b/pypy/module/binascii/interp_uu.py --- a/pypy/module/binascii/interp_uu.py +++ b/pypy/module/binascii/interp_uu.py @@ -1,6 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.rstring import StringBuilder from pypy.module.binascii.interp_binascii import raise_Error +from pypy.module.binascii.interp_binascii import AsciiBufferUnwrapper # ____________________________________________________________ @@ -29,7 +30,7 @@ _a2b_write._always_inline_ = True - at unwrap_spec(ascii='bufferstr') + at unwrap_spec(ascii=AsciiBufferUnwrapper) def a2b_uu(space, ascii): "Decode a line of uuencoded data." diff --git a/pypy/module/binascii/test/test_binascii.py b/pypy/module/binascii/test/test_binascii.py --- a/pypy/module/binascii/test/test_binascii.py +++ b/pypy/module/binascii/test/test_binascii.py @@ -58,6 +58,9 @@ raises(self.binascii.Error, self.binascii.a2b_uu, bogus + b'\n') raises(self.binascii.Error, self.binascii.a2b_uu, bogus + b'\r\n') raises(self.binascii.Error, self.binascii.a2b_uu, bogus + b' \r\n') + # + assert self.binascii.a2b_uu(u"!6") == b"X" + raises(UnicodeEncodeError, self.binascii.a2b_uu, u"caf\xe9") def test_b2a_uu(self): for input, expected in [ @@ -111,6 +114,9 @@ b"abcdefg", ]: raises(self.binascii.Error, self.binascii.a2b_base64, bogus) + # + assert self.binascii.a2b_base64(u"Yg==\n") == b"b" + raises(UnicodeEncodeError, self.binascii.a2b_base64, u"caf\xe9") def test_b2a_base64(self): for input, expected in [ @@ -149,6 +155,9 @@ (b"a_b", b"a b"), ]: assert self.binascii.a2b_qp(input, header=True) == expected + # + assert self.binascii.a2b_qp(u"a_b", header=True) == b"a b" + raises(UnicodeEncodeError, self.binascii.a2b_qp, u"caf\xe9") def test_b2a_qp(self): for input, flags, expected in [ @@ -230,6 +239,9 @@ b"AAA AAAAAA:", ]: raises(self.binascii.Error, self.binascii.a2b_hqx, bogus) + # + assert self.binascii.a2b_hqx("AAA:") == (b"]u", 1) + raises(UnicodeEncodeError, self.binascii.a2b_hqx, u"caf\xe9") def test_b2a_hqx(self): for input, expected in [ @@ -410,6 +422,9 @@ ]: assert self.binascii.unhexlify(input) == expected assert self.binascii.a2b_hex(input) == expected + assert self.binascii.unhexlify(input.decode('ascii')) == expected + assert self.binascii.a2b_hex(input.decode('ascii')) == expected + raises(UnicodeEncodeError, self.binascii.a2b_hex, u"caf\xe9") def test_errors(self): binascii = self.binascii From noreply at buildbot.pypy.org Sat Apr 12 21:24:25 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 21:24:25 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Don't accept -1 as select() timeout. Message-ID: <20140412192425.3DA661C022D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70606:f4ed04b53800 Date: 2014-04-12 18:36 +0200 http://bitbucket.org/pypy/pypy/changeset/f4ed04b53800/ Log: Don't accept -1 as select() timeout. diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -174,6 +174,9 @@ timeout = -1.0 else: timeout = space.float_w(w_timeout) + if timeout < 0.0: + raise OperationError(space.w_ValueError, + space.wrap("timeout must be non-negative")) ll_inl = lltype.nullptr(_c.fd_set.TO) ll_outl = lltype.nullptr(_c.fd_set.TO) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -22,6 +22,7 @@ finally: readend.close() writeend.close() + raises(ValueError, select.select, [], [], [], -1) def test_list_tuple(self): import time, select From noreply at buildbot.pypy.org Sat Apr 12 21:24:26 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 21:24:26 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Apply previous hacks to pickletester. Message-ID: <20140412192426.603181C022D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70607:df6a6f8c4d40 Date: 2014-04-12 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/df6a6f8c4d40/ Log: Apply previous hacks to pickletester. diff --git a/lib-python/3/test/pickletester.py b/lib-python/3/test/pickletester.py --- a/lib-python/3/test/pickletester.py +++ b/lib-python/3/test/pickletester.py @@ -1157,8 +1157,14 @@ x = BadGetattr() for proto in 0, 1: self.assertRaises(RuntimeError, self.dumps, x, proto) - # protocol 2 don't raise a RuntimeError. - d = self.dumps(x, 2) + if check_impl_detail(cpython=True): + # protocol 2 don't raise a RuntimeError. + d = self.dumps(x, 2) + self.assertRaises(RuntimeError, self.loads, d) + else: + # PyPy doesn't mask the exception + for proto in 2, 3: + self.assertRaises(RuntimeError, self.dumps, x, proto) def test_reduce_bad_iterator(self): # Issue4176: crash when 4th and 5th items of __reduce__() From noreply at buildbot.pypy.org Sat Apr 12 22:05:08 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 12 Apr 2014 22:05:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Move src/cjkcodecs C files from rpython/ to pypy/module/_multibytecodec. Message-ID: <20140412200508.57DEA1D24FA@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r70608:319a2085bf79 Date: 2014-04-12 22:04 +0200 http://bitbucket.org/pypy/pypy/changeset/319a2085bf79/ Log: Move src/cjkcodecs C files from rpython/ to pypy/module/_multibytecodec. They are unlikely to be reused for other kinds of interpreters, and they will slightly change in Python3.3 and Python3.4. diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -1,7 +1,6 @@ import py from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir UNICODE_REPLACEMENT_CHARACTER = u'\uFFFD' @@ -15,7 +14,7 @@ return 'EncodeDecodeError(%r, %r, %r)' % (self.start, self.end, self.reason) -srcdir = py.path.local(cdir) +srcdir = py.path.local(__file__).dirpath() codecs = [ # _codecs_cn diff --git a/rpython/translator/c/src/cjkcodecs/README b/pypy/module/_multibytecodec/src/cjkcodecs/README rename from rpython/translator/c/src/cjkcodecs/README rename to pypy/module/_multibytecodec/src/cjkcodecs/README diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_cn.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c rename from rpython/translator/c/src/cjkcodecs/_codecs_cn.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_hk.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c rename from rpython/translator/c/src/cjkcodecs/_codecs_hk.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c rename from rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_jp.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c rename from rpython/translator/c/src/cjkcodecs/_codecs_jp.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_kr.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c rename from rpython/translator/c/src/cjkcodecs/_codecs_kr.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_tw.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c rename from rpython/translator/c/src/cjkcodecs/_codecs_tw.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c diff --git a/rpython/translator/c/src/cjkcodecs/alg_jisx0201.h b/pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h rename from rpython/translator/c/src/cjkcodecs/alg_jisx0201.h rename to pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h diff --git a/rpython/translator/c/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h rename from rpython/translator/c/src/cjkcodecs/cjkcodecs.h rename to pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h diff --git a/rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h b/pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h rename from rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h rename to pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_cn.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h rename from rpython/translator/c/src/cjkcodecs/mappings_cn.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_hk.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h rename from rpython/translator/c/src/cjkcodecs/mappings_hk.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h rename from rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jp.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h rename from rpython/translator/c/src/cjkcodecs/mappings_jp.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_kr.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h rename from rpython/translator/c/src/cjkcodecs/mappings_kr.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_tw.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h rename from rpython/translator/c/src/cjkcodecs/mappings_tw.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c rename from rpython/translator/c/src/cjkcodecs/multibytecodec.c rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h rename from rpython/translator/c/src/cjkcodecs/multibytecodec.h rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h From noreply at buildbot.pypy.org Sun Apr 13 05:54:57 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 13 Apr 2014 05:54:57 +0200 (CEST) Subject: [pypy-commit] pypy default: fix AppTracebackEntry for compatibility with the real pytest Message-ID: <20140413035457.E9DDD1C10D2@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r70609:8663933d1efe Date: 2014-04-13 04:54 +0100 http://bitbucket.org/pypy/pypy/changeset/8663933d1efe/ Log: fix AppTracebackEntry for compatibility with the real pytest diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -25,7 +25,7 @@ # self.path = space.unwrap(space.getattr( self.path = py.path.local(space.str_w(self.w_file)) self.space = space - + def fullsource(self): filename = self.space.str_w(self.w_file) source = py.code.Source(py.std.linecache.getlines(filename)) @@ -106,27 +106,28 @@ def exconly(self, tryshort=True): return '(application-level) ' + self.operr.errorstr(self.space) - def errisinstance(self, exc): - clsname = exc.__name__ + def errisinstance(self, exc): + clsname = exc.__name__ # we can only check for builtin exceptions # as there is no canonical applevel one for custom interplevel ones if exc.__module__ != "exceptions": - return False - try: - w_exc = getattr(self.space, 'w_' + clsname) - except KeyboardInterrupt: - raise - except: - pass - else: - return self.operr.match(self.space, w_exc) - return False + return False + try: + w_exc = getattr(self.space, 'w_' + clsname) + except KeyboardInterrupt: + raise + except: + pass + else: + return self.operr.match(self.space, w_exc) + return False def __str__(self): return '(application-level) ' + self.operr.errorstr(self.space) class AppTracebackEntry(py.code.Traceback.Entry): exprinfo = None + frame = None def __init__(self, space, tb): self.frame = AppFrame(space, space.getattr(tb, space.wrap('tb_frame'))) @@ -142,8 +143,11 @@ # XXX this reinterpret() is only here to prevent reinterpretation. return self.exprinfo -class AppTraceback(py.code.Traceback): - Entry = AppTracebackEntry + def ishidden(self): + return False + +class AppTraceback(py.code.Traceback): + Entry = AppTracebackEntry def __init__(self, space, apptb): l = [] @@ -151,7 +155,7 @@ l.append(self.Entry(space, apptb)) apptb = space.getattr(apptb, space.wrap('tb_next')) list.__init__(self, l) - + # ____________________________________________________________ def build_pytest_assertion(space): @@ -163,10 +167,10 @@ ## # Argh! we may see app-level helpers in the frame stack! ## # that's very probably very bad... ## ^^^the above comment may be outdated, but we are not sure - + # if the assertion provided a message, don't do magic args_w, kwargs_w = __args__.unpack() - if args_w: + if args_w: w_msg = args_w[0] else: frame = space.getexecutioncontext().gettopframe() @@ -174,7 +178,7 @@ try: source = runner.statement source = str(source).strip() - except py.error.ENOENT: + except py.error.ENOENT: source = None from pypy import conftest if source and py.test.config._assertstate.mode != "off": @@ -187,7 +191,7 @@ space.setattr(w_self, space.wrap('msg'), w_msg) # build a new AssertionError class to replace the original one. - w_BuiltinAssertionError = space.getitem(space.builtin.w_dict, + w_BuiltinAssertionError = space.getitem(space.builtin.w_dict, space.wrap('AssertionError')) w_metaclass = space.type(w_BuiltinAssertionError) w_init = space.wrap(gateway.interp2app_temp(my_init)) @@ -260,9 +264,9 @@ app_raises = gateway.interp2app_temp(pypyraises) -def pypyskip(space, w_message): - """skip a test at app-level. """ - msg = space.unwrap(w_message) +def pypyskip(space, w_message): + """skip a test at app-level. """ + msg = space.unwrap(w_message) py.test.skip(msg) app_skip = gateway.interp2app_temp(pypyskip) From noreply at buildbot.pypy.org Sun Apr 13 16:18:48 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 13 Apr 2014 16:18:48 +0200 (CEST) Subject: [pypy-commit] pypy default: A bit more tweaking of details of specialization of some strange jmp Message-ID: <20140413141848.DF74F1D2B18@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70610:b9eded7b2b4f Date: 2014-04-13 16:17 +0200 http://bitbucket.org/pypy/pypy/changeset/b9eded7b2b4f/ Log: A bit more tweaking of details of specialization of some strange jmp diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -707,7 +707,7 @@ raise OperationError(space.w_ValueError, space.wrap("list modified during sort")) -find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') +find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find') class ListStrategy(object): @@ -733,8 +733,9 @@ space = self.space i = start # needs to be safe against eq_w mutating stuff + tp = space.type(w_item) while i < stop and i < w_list.length(): - find_jmp.jit_merge_point() + find_jmp.jit_merge_point(tp=tp) if space.eq_w(w_list.getitem(i), w_item): return i i += 1 diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1091,6 +1091,7 @@ def _intersect_wrapped(self, w_set, w_other): result = newset(self.space) for key in self.unerase(w_set.sstorage): + self.intersect_jmp.jit_merge_point() w_key = self.wrap(key) if w_other.has_key(w_key): result[w_key] = None @@ -1201,6 +1202,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(bytes).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1237,6 +1241,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(unicode).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1273,6 +1280,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(int).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1311,6 +1321,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(object).intersect') + def get_empty_storage(self): return self.erase(self.get_empty_dict()) @@ -1355,6 +1368,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(identity).intersect') + def get_empty_storage(self): return self.erase({}) diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -27,7 +27,7 @@ jit.loop_unrolling_heuristic(other, other.length(), UNROLL_CUTOFF)) -contains_jmp = jit.JitDriver(greens = [], reds = 'auto', +contains_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'tuple.contains') class W_AbstractTupleObject(W_Root): @@ -136,8 +136,9 @@ return space.w_False def _descr_contains_jmp(self, space, w_obj): + tp = space.type(w_obj) for w_item in self.tolist(): - contains_jmp.jit_merge_point() + contains_jmp.jit_merge_point(tp=tp) if space.eq_w(w_item, w_obj): return space.w_True return space.w_False From noreply at buildbot.pypy.org Sun Apr 13 16:33:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 13 Apr 2014 16:33:44 +0200 (CEST) Subject: [pypy-commit] pypy py3k: do not rename windows import library in rpython, it belongs in pypy Message-ID: <20140413143344.68A151D2B1A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: py3k Changeset: r70611:76a42490f211 Date: 2014-04-13 15:09 +0300 http://bitbucket.org/pypy/pypy/changeset/76a42490f211/ Log: do not rename windows import library in rpython, it belongs in pypy diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -476,11 +476,11 @@ shutil_copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) if sys.platform == 'win32': - # the import library is named python27.lib, according - # to the pragma in pyconfig.h - libname = str(newsoname.dirpath().join('python27.lib')) - shutil.copyfile(str(soname.new(ext='lib')), libname) - self.log.info("copied: %s" % (libname,)) + # copy the import library as well + libname = soname.new(ext='lib') + newlibname = newexename.new(basename=soname.basename) + shutil.copyfile(str(libname), str(newlibname.new(ext='lib'))) + self.log.info("copied: %s" % (newlibname,)) self.c_entryp = newexename self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -72,7 +72,7 @@ td.create_exe() assert dst_name.read() == 'exe' assert dst_name.new(ext='dll').read() == 'dll' - assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib' + assert dst_name.new(ext='lib').read() == 'lib' def test_shutil_copy(): a = udir.join('file_a') From noreply at buildbot.pypy.org Sun Apr 13 16:33:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 13 Apr 2014 16:33:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix for import lib name (cc1896cf5625) and prepare import lib for tests (with TODO) Message-ID: <20140413143345.A4B151D2B1A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: py3k Changeset: r70612:8eda4db82d6b Date: 2014-04-13 17:18 +0300 http://bitbucket.org/pypy/pypy/changeset/8eda4db82d6b/ Log: fix for import lib name (cc1896cf5625) and prepare import lib for tests (with TODO) diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -115,12 +115,22 @@ continue print "Picking %s" % p binaries.append((p, p.basename)) - importlib_name = 'python27.lib' + importlib_name = 'libpypy-c.lib' if pypy_c.dirpath().join(importlib_name).check(): - shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), - str(pypydir.join('include/python27.lib'))) - print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), - pypydir.join('include/python27.lib')) + try: + ver = subprocess.check_output([r'pypy\goal\pypy-c','-c', + "import sys;print(sys.version)"]) + importlib_target = 'python%s%s.lib' % (ver[0], ver[2]) + shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), + str(pypydir.join(importlib_target))) + # XXX fix this, either an additional build step or rename + # both DLL and LIB to versioned names, like cpython + shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), + str(pypy_c.dirpath().join(importlib_target))) + print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), + pypydir.join('include', importlib_target)) + except: + pass else: pass # XXX users will complain that they cannot compile cpyext From noreply at buildbot.pypy.org Sun Apr 13 16:33:47 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 13 Apr 2014 16:33:47 +0200 (CEST) Subject: [pypy-commit] pypy py3k: update import library name on windows Message-ID: <20140413143347.113821D2B1A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: py3k Changeset: r70613:b8262e67caef Date: 2014-04-13 17:29 +0300 http://bitbucket.org/pypy/pypy/changeset/b8262e67caef/ Log: update import library name on windows diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -53,10 +53,10 @@ if sys.platform == 'win32': # XXX pyconfig.h uses a pragma to link to the import library, # which is currently python3.lib - library = os.path.join(thisdir, '..', 'include', 'python3') + library = os.path.join(thisdir, '..', 'include', 'python32') if not os.path.exists(library + '.lib'): # For a local translation or nightly build - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python3') + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python32') assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 From noreply at buildbot.pypy.org Sun Apr 13 18:25:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 13 Apr 2014 18:25:12 +0200 (CEST) Subject: [pypy-commit] stmgc default: Uniformize the lock management Message-ID: <20140413162512.3C4781C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1152:3bfb23623e74 Date: 2014-04-12 12:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/3bfb23623e74/ Log: Uniformize the lock management diff --git a/c7/stm/atomic.h b/c7/stm/atomic.h --- a/c7/stm/atomic.h +++ b/c7/stm/atomic.h @@ -36,4 +36,12 @@ #endif +#define spinlock_acquire(lock) \ + do { if (LIKELY(__sync_lock_test_and_set(&(lock), 1) == 0)) break; \ + spin_loop(); } while (1) +#define spinlock_release(lock) \ + do { assert((lock) == 1); \ + __sync_lock_release(&(lock)); } while (0) + + #endif /* _STM_ATOMIC_H */ diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -14,12 +14,13 @@ #define EVENTUALLY(condition) \ { \ if (!(condition)) { \ - while (!__sync_bool_compare_and_swap( \ - &pages_privatizing.by_segment, 0, -1)) \ - spin_loop(); \ + int _i; \ + for (_i = 1; _i <= NB_SEGMENTS; _i++) \ + spinlock_acquire(lock_pages_privatizing[_i]); \ if (!(condition)) \ stm_fatalerror("fails: " #condition); \ - __sync_lock_release(&pages_privatizing.by_segment); \ + for (_i = 1; _i <= NB_SEGMENTS; _i++) \ + spinlock_release(lock_pages_privatizing[_i]); \ } \ } #endif diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -90,8 +90,7 @@ } /* uncommon case: need to initialize some more pages */ - while (__sync_lock_test_and_set(&lock_growth_large, 1) != 0) - spin_loop(); + spinlock_acquire(lock_growth_large); if (addr + size > uninitialized_page_start) { uintptr_t npages; @@ -105,7 +104,7 @@ __sync_synchronize(); uninitialized_page_start += npages * 4096UL; } - __sync_lock_release(&lock_growth_large); + spinlock_release(lock_growth_large); return addr; } diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -116,14 +116,12 @@ static void lm_lock(void) { - while (UNLIKELY(__sync_lock_test_and_set(&lm.lock, 1) != 0)) - spin_loop(); + spinlock_acquire(lm.lock); } static void lm_unlock(void) { - assert(lm.lock == 1); - __sync_lock_release(&lm.lock); + spinlock_release(lm.lock); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -115,10 +115,7 @@ } #ifndef NDEBUG - while (__sync_fetch_and_or(&pages_privatizing.by_segment, bitmask) - & bitmask) { - spin_loop(); - } + spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); #endif /* add this thread's 'pages_privatized' bit */ @@ -137,7 +134,7 @@ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); #ifndef NDEBUG - __sync_fetch_and_sub(&pages_privatizing.by_segment, bitmask); + spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); #endif } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -74,5 +74,5 @@ } #ifndef NDEBUG -static struct page_shared_s pages_privatizing = { 0 }; +static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; #endif From noreply at buildbot.pypy.org Sun Apr 13 18:25:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 13 Apr 2014 18:25:13 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: hg merge default Message-ID: <20140413162513.8E73E1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1153:f4c49a88012e Date: 2014-04-12 12:40 +0200 http://bitbucket.org/pypy/stmgc/changeset/f4c49a88012e/ Log: hg merge default diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -17,7 +17,7 @@ H_FILES = ../stmgc.h ../stm/*.h C_FILES = ../stmgc.c ../stm/*.c -COMMON = -I.. -pthread -lrt -g -Wall -Werror +COMMON = -I.. -pthread -lrt -g -Wall -Werror -DSTM_LARGEMALLOC_TEST # note that 'build' is partially optimized but still contains all asserts diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_largemalloc.c @@ -0,0 +1,72 @@ +#include +#include +#include +#include + +#include "stmgc.h" +#include "../stm/largemalloc.h" + +static inline double get_stm_time(void) +{ + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + return tp.tv_sec + tp.tv_nsec * 0.000000001; +} + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + abort(); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + abort(); +} + +/************************************************************/ + +#define ARENA_SIZE (1024*1024*1024) + +static char *arena_data; +extern bool (*_stm_largemalloc_keep)(char *data); /* a hook for tests */ +void _stm_mutex_pages_lock(void); + + +static bool keep_me(char *data) { + static bool last_answer = false; + last_answer = !last_answer; + return last_answer; +} + +void timing(int scale) +{ + long limit = 1L << scale; + _stm_largemalloc_init_arena(arena_data, ARENA_SIZE); + double start = get_stm_time(); + + long i; + for (i = 0; i < limit; i++) { + _stm_large_malloc(16 + 8 * (i % 4)); /* may return NULL */ + } + _stm_largemalloc_keep = keep_me; + _stm_largemalloc_sweep(); + for (i = 0; i < limit; i++) { + _stm_large_malloc(16 + 8 * (i % 4)); /* may return NULL */ + } + + double stop = get_stm_time(); + printf("scale %2d: %.9f\n", scale, stop - start); +} + + + +int main(void) +{ + int i; + arena_data = malloc(ARENA_SIZE); + assert(arena_data != NULL); + _stm_mutex_pages_lock(); + for (i = 0; i < 25; i++) + timing(i); + return 0; +} diff --git a/c7/gdb/gdb_stm.py b/c7/gdb/gdb_stm.py new file mode 100644 --- /dev/null +++ b/c7/gdb/gdb_stm.py @@ -0,0 +1,49 @@ +""" Adds two built-in functions: $rfs(p=0) and $rgs(p=0). + +Returns the number or the address 'p', offset with the value of +the %fs or %gs register in the current thread. + +Usage: you can for example add this line in your ~/.gdbinit: + + python execfile('/path/to/gdb_stm.py') +""" +import gdb + +def gdb_function(func): + class Func(gdb.Function): + __doc__ = func.__doc__ + invoke = staticmethod(func) + Func(func.__name__) + +# ------------------------------------------------------- + +SEG_FS = 0x1003 +SEG_GS = 0x1004 + +def get_segment_register(which): + v = gdb.parse_and_eval('(long*)malloc(8)') + L = gdb.lookup_type('long') + gdb.parse_and_eval('arch_prctl(%d, %d)' % (which, int(v.cast(L)))) + result = int(v.dereference()) + gdb.parse_and_eval('free(%d)' % (int(v.cast(L)),)) + return result + +def rfsrgs(name, which): + seg = get_segment_register(which) + if name is None: + return seg + tp = name.type + if tp.code == gdb.TYPE_CODE_INT: + return name + seg + assert tp.code == gdb.TYPE_CODE_PTR + L = gdb.lookup_type('long') + return (name.cast(L) + seg).cast(tp) + + at gdb_function +def rfs(name=None): + return rfsrgs(name, SEG_FS) + + at gdb_function +def rgs(name=None): + return rfsrgs(name, SEG_GS) + diff --git a/c7/stm/atomic.h b/c7/stm/atomic.h --- a/c7/stm/atomic.h +++ b/c7/stm/atomic.h @@ -36,4 +36,12 @@ #endif +#define spinlock_acquire(lock) \ + do { if (LIKELY(__sync_lock_test_and_set(&(lock), 1) == 0)) break; \ + spin_loop(); } while (1) +#define spinlock_release(lock) \ + do { assert((lock) == 1); \ + __sync_lock_release(&(lock)); } while (0) + + #endif /* _STM_ATOMIC_H */ diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -8,6 +8,23 @@ memset(write_locks, 0, sizeof(write_locks)); } +#ifdef NDEBUG +#define EVENTUALLY(condition) /* nothing */ +#else +#define EVENTUALLY(condition) \ + { \ + if (!(condition)) { \ + int _i; \ + for (_i = 1; _i <= NB_SEGMENTS; _i++) \ + spinlock_acquire(lock_pages_privatizing[_i]); \ + if (!(condition)) \ + stm_fatalerror("fails: " #condition); \ + for (_i = 1; _i <= NB_SEGMENTS; _i++) \ + spinlock_release(lock_pages_privatizing[_i]); \ + } \ + } +#endif + static void check_flag_write_barrier(object_t *obj) { /* check that all copies of the object, apart from mine, have the @@ -21,12 +38,7 @@ if (i == STM_SEGMENT->segment_num) continue; o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); - if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) { - mutex_pages_lock(); /* try again... */ - if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) - stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); - mutex_pages_unlock(); - } + EVENTUALLY(o1->stm_flags & GCFLAG_WRITE_BARRIER); } #endif } @@ -271,7 +283,6 @@ with synchronize_object_now() but I don't completely see how to improve... */ - assert(_has_mutex_pages()); assert(!_is_young(obj)); uintptr_t start = (uintptr_t)obj; @@ -326,10 +337,7 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. - - This must be called with the mutex_pages_lock! */ - assert(_has_mutex_pages()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); @@ -406,7 +414,7 @@ memcpy(dst, src, copy_size); } else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ + EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ } for (i = 1; i <= NB_SEGMENTS; i++) { @@ -425,7 +433,7 @@ memcpy(dst, src, copy_size); } else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ + EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ } } @@ -518,12 +526,10 @@ major_collection_now_at_safe_point(); /* synchronize overflow objects living in privatized pages */ - mutex_pages_lock(); push_overflow_objects_from_privatized_pages(); /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); - mutex_pages_unlock(); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -35,8 +35,6 @@ #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) #define WRITELOCK_END READMARKER_END -#define SHADOW_STACK_SIZE 1000 - enum /* stm_flags */ { /* This flag is set on non-nursery objects. It forces stm_write() to call _stm_write_slowpath(). diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -70,7 +70,6 @@ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); - mutex_pages_lock(); /* Make a new mmap at some other address, but of the same size as the standard mmap at stm_object_pages @@ -166,7 +165,6 @@ fork_big_copy = NULL; bool was_in_transaction = fork_was_in_transaction; - mutex_pages_unlock(); s_mutex_unlock(); if (!was_in_transaction) { @@ -203,7 +201,6 @@ /* this new process contains no other thread, so we can just release these locks early */ - mutex_pages_unlock(); s_mutex_unlock(); /* Move the copy of the mmap over the old one, overwriting it diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -32,18 +32,23 @@ pages_initialize_shared((pages_addr - stm_object_pages) / 4096UL, num); } + +static int lock_growth_large = 0; + static char *allocate_outside_nursery_large(uint64_t size) { - /* thread-safe: use the lock of pages.c to prevent any remapping - from occurring under our feet */ - mutex_pages_lock(); - increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); - /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) stm_fatalerror("not enough memory!"); + if (LIKELY(addr + size <= uninitialized_page_start)) { + return addr; + } + + /* uncommon case: need to initialize some more pages */ + spinlock_acquire(lock_growth_large); + if (addr + size > uninitialized_page_start) { uintptr_t npages; npages = (addr + size - uninitialized_page_start) / 4096UL; @@ -53,11 +58,10 @@ stm_fatalerror("out of memory!"); /* XXX */ } setup_N_pages(uninitialized_page_start, npages); + __sync_synchronize(); uninitialized_page_start += npages * 4096UL; } - - mutex_pages_unlock(); - + spinlock_release(lock_growth_large); return addr; } @@ -213,7 +217,6 @@ total_allocated by 4096. */ long i; - mutex_pages_lock(); for (i = 1; i <= NB_SEGMENTS; i++) { /* The 'modified_old_objects' list gives the list of objects @@ -263,7 +266,6 @@ for (i = 1; i <= NB_SEGMENTS; i++) { major_restore_private_bits_for_modified_objects(i); } - mutex_pages_unlock(); } @@ -422,9 +424,7 @@ static void sweep_large_objects(void) { - mutex_pages_lock(); _stm_largemalloc_sweep(); - mutex_pages_unlock(); } static void clean_write_locks(void) diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -20,19 +20,25 @@ #define LAST_BIN_INDEX(sz) ((sz) >= (3 << 18)) typedef struct dlist_s { - struct dlist_s *next; /* a doubly-linked list */ + struct dlist_s *next; /* a circular doubly-linked list */ struct dlist_s *prev; } dlist_t; +typedef struct ulist_s { + struct ulist_s *up; /* a non-circular doubly-linked list */ + struct ulist_s *down; +} ulist_t; + typedef struct malloc_chunk { size_t prev_size; /* - if the previous chunk is free: size of its data - otherwise, if this chunk is free: 1 - otherwise, 0. */ - size_t size; /* size of the data in this chunk, - plus optionally the FLAG_SORTED */ + size_t size; /* size of the data in this chunk */ - dlist_t d; /* if free: a doubly-linked list */ + dlist_t d; /* if free: a doubly-linked list 'largebins' */ /* if not free: the user data starts here */ + ulist_t u; /* if free, if unsorted: up==UU_UNSORTED + if free, if sorted: a doubly-linked list */ /* The chunk has a total size of 'size'. It is immediately followed in memory by another chunk. This list ends with the last "chunk" @@ -41,29 +47,22 @@ one are considered "not free". */ } mchunk_t; -#define FLAG_SORTED 1 +#define UU_UNSORTED ((ulist_t *) 1) #define THIS_CHUNK_FREE 1 #define BOTH_CHUNKS_USED 0 #define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) #define END_MARKER 0xDEADBEEF +#define MIN_ALLOC_SIZE (sizeof(struct malloc_chunk) - CHUNK_HEADER_SIZE) #define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) #define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) +#define updown2chunk(p) chunk_at_offset(p, \ + -(CHUNK_HEADER_SIZE + sizeof(dlist_t))) -static mchunk_t *next_chunk_s(mchunk_t *p) +static mchunk_t *next_chunk(mchunk_t *p) { - assert(p->size & FLAG_SORTED); - return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size - FLAG_SORTED); -} -static mchunk_t *next_chunk_u(mchunk_t *p) -{ - assert(!(p->size & FLAG_SORTED)); return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size); } -static mchunk_t *next_chunk_a(mchunk_t *p) -{ - return chunk_at_offset(p, CHUNK_HEADER_SIZE + (p->size & ~FLAG_SORTED)); -} /* The free chunks are stored in "bins". Each bin is a doubly-linked @@ -76,36 +75,73 @@ neighbors to ensure this. In each bin's doubly-linked list, chunks are sorted by their size in - decreasing order (if you start from 'd.next'). At the end of this - list are some unsorted chunks. All unsorted chunks are after all - sorted chunks. The flag 'FLAG_SORTED' distinguishes them. + decreasing order (if you follow 'largebins[n].next', + 'largebins[n].next->next', etc.). At the end of this list are some + unsorted chunks. All unsorted chunks are after all sorted chunks. + Unsorted chunks are distinguished by having 'u.up == UU_UNSORTED'. Note that if the user always calls large_malloc() with a large enough argument, then the few bins corresponding to smaller values will never be sorted at all. They are still populated with the fragments of space between bigger allocations. + + Following the 'd' linked list, we get only one chunk of every size. + The additional chunks of a given size are linked "vertically" in + the secondary 'u' doubly-linked list. + + + +-----+ + | 296 | + +-----+ + ^ | + | v + +-----+ +-----+ + | 296 | | 288 | + +-----+ +-----+ + ^ | ^ | UU_UNSORTED + | v | v | + largebins +-----+ +-----+ +-----+ +-----+ largebins + [4].next <-> | 304 | <-> | 296 | <-> | 288 | <-> | 296 | <-> [4].prev + +-----+ +-----+ +-----+ +-----+ + */ -static dlist_t largebins[N_BINS]; -static mchunk_t *first_chunk, *last_chunk; + +static struct { + int lock; + mchunk_t *first_chunk, *last_chunk; + dlist_t largebins[N_BINS]; +} lm __attribute__((aligned(64))); + + +static void lm_lock(void) +{ + spinlock_acquire(lm.lock); +} + +static void lm_unlock(void) +{ + spinlock_release(lm.lock); +} static void insert_unsorted(mchunk_t *new) { size_t index = LAST_BIN_INDEX(new->size) ? N_BINS - 1 : largebin_index(new->size); - new->d.next = &largebins[index]; - new->d.prev = largebins[index].prev; + new->d.next = &lm.largebins[index]; + new->d.prev = lm.largebins[index].prev; new->d.prev->next = &new->d; - largebins[index].prev = &new->d; - assert(!(new->size & FLAG_SORTED)); + new->u.up = UU_UNSORTED; + new->u.down = NULL; + lm.largebins[index].prev = &new->d; } static int compare_chunks(const void *vchunk1, const void *vchunk2) { /* sort by size */ - const mchunk_t *chunk1 = (const mchunk_t *)vchunk1; - const mchunk_t *chunk2 = (const mchunk_t *)vchunk2; + mchunk_t *chunk1 = *(mchunk_t *const *)vchunk1; + mchunk_t *chunk2 = *(mchunk_t *const *)vchunk2; if (chunk1->size < chunk2->size) return -1; if (chunk1->size == chunk2->size) @@ -114,13 +150,15 @@ return +1; } +#define MAX_STACK_COUNT 64 + static void really_sort_bin(size_t index) { - dlist_t *unsorted = largebins[index].prev; - dlist_t *end = &largebins[index]; + dlist_t *unsorted = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; dlist_t *scan = unsorted->prev; size_t count = 1; - while (scan != end && !(data2chunk(scan)->size & FLAG_SORTED)) { + while (scan != end && data2chunk(scan)->u.up == UU_UNSORTED) { scan = scan->prev; ++count; } @@ -128,12 +166,20 @@ scan->next = end; mchunk_t *chunk1; - mchunk_t *chunks[count]; /* dynamically-sized */ + mchunk_t *chunk_array[MAX_STACK_COUNT]; + mchunk_t **chunks = chunk_array; + if (count == 1) { chunk1 = data2chunk(unsorted); /* common case */ count = 0; } else { + if (count > MAX_STACK_COUNT) { + chunks = malloc(count * sizeof(mchunk_t *)); + if (chunks == NULL) { + stm_fatalerror("out of memory"); // XXX + } + } size_t i; for (i = 0; i < count; i++) { chunks[i] = data2chunk(unsorted); @@ -144,55 +190,111 @@ chunk1 = chunks[--count]; } - chunk1->size |= FLAG_SORTED; size_t search_size = chunk1->size; - dlist_t *head = largebins[index].next; + dlist_t *head = lm.largebins[index].next; while (1) { - if (head == end || search_size >= data2chunk(head)->size) { + if (head == end || data2chunk(head)->size < search_size) { /* insert 'chunk1' here, before the current head */ head->prev->next = &chunk1->d; chunk1->d.prev = head->prev; head->prev = &chunk1->d; chunk1->d.next = head; - if (count == 0) - break; /* all done */ - chunk1 = chunks[--count]; - chunk1->size |= FLAG_SORTED; - search_size = chunk1->size; + chunk1->u.up = NULL; + chunk1->u.down = NULL; + head = &chunk1->d; + } + else if (data2chunk(head)->size == search_size) { + /* insert 'chunk1' vertically in the 'u' list */ + ulist_t *uhead = &data2chunk(head)->u; + chunk1->u.up = uhead->up; + chunk1->u.down = uhead; + if (uhead->up != NULL) + uhead->up->down = &chunk1->u; + uhead->up = &chunk1->u; +#ifndef NDEBUG + chunk1->d.next = (dlist_t *)0x42; /* not used */ + chunk1->d.prev = (dlist_t *)0x42; +#endif } else { head = head->next; + continue; } + if (count == 0) + break; /* all done */ + chunk1 = chunks[--count]; + search_size = chunk1->size; } + + if (chunks != chunk_array) + free(chunks); } static void sort_bin(size_t index) { - dlist_t *last = largebins[index].prev; - if (last != &largebins[index] && !(data2chunk(last)->size & FLAG_SORTED)) + dlist_t *last = lm.largebins[index].prev; + if (last != &lm.largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) really_sort_bin(index); } +static void unlink_chunk(mchunk_t *mscan) +{ + if (mscan->u.down != NULL) { + /* unlink mscan from the vertical list 'u' */ + ulist_t *up = mscan->u.up; + ulist_t *down = mscan->u.down; + down->up = up; + if (up != NULL) up->down = down; + } + else { + dlist_t *prev = mscan->d.prev; + dlist_t *next = mscan->d.next; + if (mscan->u.up == NULL || mscan->u.up == UU_UNSORTED) { + /* unlink mscan from the doubly-linked list 'd' */ + next->prev = prev; + prev->next = next; + } + else { + /* relink in the 'd' list the item above me */ + mchunk_t *above = updown2chunk(mscan->u.up); + next->prev = &above->d; + prev->next = &above->d; + above->d.next = next; + above->d.prev = prev; + above->u.down = NULL; + } + } +} + char *_stm_large_malloc(size_t request_size) { /* 'request_size' should already be a multiple of the word size here */ assert((request_size & (sizeof(char *)-1)) == 0); + /* it can be very small, but we need to ensure a minimal size + (currently 32 bytes) */ + if (request_size < MIN_ALLOC_SIZE) + request_size = MIN_ALLOC_SIZE; + + lm_lock(); + size_t index = largebin_index(request_size); sort_bin(index); /* scan through the chunks of current bin in reverse order to find the smallest that fits. */ - dlist_t *scan = largebins[index].prev; - dlist_t *end = &largebins[index]; + dlist_t *scan = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; mchunk_t *mscan; while (scan != end) { mscan = data2chunk(scan); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert(next_chunk_s(mscan)->prev_size == mscan->size - FLAG_SORTED); + assert(next_chunk(mscan)->prev_size == mscan->size); + assert(IMPLY(mscan->d.prev != end, + data2chunk(mscan->d.prev)->size > mscan->size)); - if (mscan->size > request_size) + if (mscan->size >= request_size) goto found; scan = mscan->d.prev; } @@ -201,31 +303,40 @@ smallest item of the first non-empty bin, as it will be large enough. */ while (++index < N_BINS) { - if (largebins[index].prev != &largebins[index]) { + if (lm.largebins[index].prev != &lm.largebins[index]) { /* non-empty bin. */ sort_bin(index); - scan = largebins[index].prev; - end = &largebins[index]; + scan = lm.largebins[index].prev; mscan = data2chunk(scan); goto found; } } /* not enough memory. */ + lm_unlock(); return NULL; found: - assert(mscan->size & FLAG_SORTED); - assert(mscan->size > request_size); + assert(mscan->size >= request_size); + assert(mscan->u.up != UU_UNSORTED); - /* unlink mscan from the doubly-linked list */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + if (mscan->u.up != NULL) { + /* fast path: grab the item that is just above, to avoid needing + to rearrange the 'd' list */ + mchunk_t *above = updown2chunk(mscan->u.up); + ulist_t *two_above = above->u.up; + mscan->u.up = two_above; + if (two_above != NULL) two_above->down = &mscan->u; + mscan = above; + } + else { + unlink_chunk(mscan); + } - size_t remaining_size_plus_1 = mscan->size - request_size; - if (remaining_size_plus_1 <= sizeof(struct malloc_chunk)) { - next_chunk_s(mscan)->prev_size = BOTH_CHUNKS_USED; - request_size = mscan->size & ~FLAG_SORTED; + size_t remaining_size = mscan->size - request_size; + if (remaining_size < sizeof(struct malloc_chunk)) { + next_chunk(mscan)->prev_size = BOTH_CHUNKS_USED; + request_size = mscan->size; } else { /* only part of the chunk is being used; reduce the size @@ -234,27 +345,35 @@ mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + request_size); new->prev_size = THIS_CHUNK_FREE; - size_t remaining_size = remaining_size_plus_1 - 1 - CHUNK_HEADER_SIZE; - new->size = remaining_size; - next_chunk_u(new)->prev_size = remaining_size; + size_t remaining_data_size = remaining_size - CHUNK_HEADER_SIZE; + new->size = remaining_data_size; + next_chunk(new)->prev_size = remaining_data_size; insert_unsorted(new); } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; + increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); + + lm_unlock(); return (char *)&mscan->d; } -void _stm_large_free(char *data) +static void _large_free(mchunk_t *chunk) { - mchunk_t *chunk = data2chunk(data); assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); + /* 'size' is at least MIN_ALLOC_SIZE */ + increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); + #ifndef NDEBUG - assert(chunk->size >= sizeof(dlist_t)); - assert(chunk->size <= (((char *)last_chunk) - (char *)data)); - memset(data, 0xDE, chunk->size); + { + char *data = (char *)&chunk->d; + assert(chunk->size >= sizeof(dlist_t)); + assert(chunk->size <= (((char *)lm.last_chunk) - data)); + memset(data, 0xDE, chunk->size); + } #endif /* try to merge with the following chunk in memory */ @@ -262,17 +381,15 @@ mchunk_t *mscan = chunk_at_offset(chunk, msize); if (mscan->prev_size == BOTH_CHUNKS_USED) { - assert((mscan->size & ((sizeof(char *) - 1) & ~FLAG_SORTED)) == 0); + assert((mscan->size & (sizeof(char *) - 1)) == 0); mscan->prev_size = chunk->size; } else { - mscan->size &= ~FLAG_SORTED; size_t fsize = mscan->size; mchunk_t *fscan = chunk_at_offset(mscan, fsize + CHUNK_HEADER_SIZE); /* unlink the following chunk */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + unlink_chunk(mscan); #ifndef NDEBUG mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ @@ -296,15 +413,14 @@ msize = chunk->prev_size + CHUNK_HEADER_SIZE; mscan = chunk_at_offset(chunk, -msize); assert(mscan->prev_size == THIS_CHUNK_FREE); - assert((mscan->size & ~FLAG_SORTED) == chunk->prev_size); + assert(mscan->size == chunk->prev_size); /* unlink the previous chunk */ - mscan->d.next->prev = mscan->d.prev; - mscan->d.prev->next = mscan->d.next; + unlink_chunk(mscan); /* merge the two chunks */ mscan->size = msize + chunk->size; - next_chunk_u(mscan)->prev_size = mscan->size; + next_chunk(mscan)->prev_size = mscan->size; assert(chunk->prev_size = (size_t)-1); assert(chunk->size = (size_t)-1); @@ -314,18 +430,28 @@ insert_unsorted(chunk); } +void _stm_large_free(char *data) +{ + lm_lock(); + _large_free(data2chunk(data)); + lm_unlock(); +} + void _stm_large_dump(void) { - char *data = ((char *)first_chunk) + 16; + lm_lock(); + char *data = ((char *)lm.first_chunk) + 16; size_t prev_size_if_free = 0; + fprintf(stderr, "\n"); while (1) { - fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + assert((((uintptr_t)data) & 7) == 0); /* alignment */ + fprintf(stderr, "[ %p: %zu", data - 16, *(size_t*)(data - 16)); if (prev_size_if_free == 0) { assert(*(size_t*)(data - 16) == THIS_CHUNK_FREE || *(size_t*)(data - 16) == BOTH_CHUNKS_USED); if (*(size_t*)(data - 16) == THIS_CHUNK_FREE) - prev_size_if_free = (*(size_t*)(data - 8)) & ~FLAG_SORTED; + prev_size_if_free = (*(size_t*)(data - 8)); } else { assert(*(size_t*)(data - 16) == prev_size_if_free); @@ -333,30 +459,33 @@ } if (*(size_t*)(data - 8) == END_MARKER) break; - fprintf(stderr, " %p: %zu ]", data - 8, *(size_t*)(data - 8)); if (prev_size_if_free) { - fprintf(stderr, " (free %p / %p)\n", - *(void **)data, *(void **)(data + 8)); + fprintf(stderr, " \t(up %p / down %p)", + *(void **)(data + 16), *(void **)(data + 24)); + } + fprintf(stderr, "\n %p: %zu ]", data - 8, *(size_t*)(data - 8)); + if (prev_size_if_free) { + fprintf(stderr, "\t(prev %p <-> next %p)\n", + *(void **)(data + 8), *(void **)data); } else { fprintf(stderr, "\n"); } - if (!prev_size_if_free) - assert(!((*(size_t*)(data - 8)) & FLAG_SORTED)); assert(*(ssize_t*)(data - 8) >= 16); - data += (*(size_t*)(data - 8)) & ~FLAG_SORTED; + data += *(size_t*)(data - 8); data += 16; } - fprintf(stderr, " %p: end. ]\n\n", data - 8); - assert(data - 16 == (char *)last_chunk); + fprintf(stderr, "\n %p: end. ]\n\n", data - 8); + assert(data - 16 == (char *)lm.last_chunk); + lm_unlock(); } char *_stm_largemalloc_data_start(void) { - return (char *)first_chunk; + return (char *)lm.first_chunk; } -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST bool (*_stm_largemalloc_keep)(char *data); /* a hook for tests */ #endif @@ -364,87 +493,95 @@ { int i; for (i = 0; i < N_BINS; i++) { - largebins[i].prev = &largebins[i]; - largebins[i].next = &largebins[i]; + lm.largebins[i].prev = &lm.largebins[i]; + lm.largebins[i].next = &lm.largebins[i]; } assert(data_size >= 2 * sizeof(struct malloc_chunk)); assert((data_size & 31) == 0); - first_chunk = (mchunk_t *)data_start; - first_chunk->prev_size = THIS_CHUNK_FREE; - first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; - last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE); - last_chunk->prev_size = first_chunk->size; - last_chunk->size = END_MARKER; - assert(last_chunk == next_chunk_u(first_chunk)); + lm.first_chunk = (mchunk_t *)data_start; + lm.first_chunk->prev_size = THIS_CHUNK_FREE; + lm.first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; + lm.last_chunk = chunk_at_offset(lm.first_chunk, + data_size - CHUNK_HEADER_SIZE); + lm.last_chunk->prev_size = lm.first_chunk->size; + lm.last_chunk->size = END_MARKER; + assert(lm.last_chunk == next_chunk(lm.first_chunk)); + lm.lock = 0; - insert_unsorted(first_chunk); + insert_unsorted(lm.first_chunk); -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST _stm_largemalloc_keep = NULL; #endif } int _stm_largemalloc_resize_arena(size_t new_size) { + int result = 0; + lm_lock(); + if (new_size < 2 * sizeof(struct malloc_chunk)) - return 0; + goto fail; OPT_ASSERT((new_size & 31) == 0); new_size -= CHUNK_HEADER_SIZE; - mchunk_t *new_last_chunk = chunk_at_offset(first_chunk, new_size); - mchunk_t *old_last_chunk = last_chunk; - size_t old_size = ((char *)old_last_chunk) - (char *)first_chunk; + mchunk_t *new_last_chunk = chunk_at_offset(lm.first_chunk, new_size); + mchunk_t *old_last_chunk = lm.last_chunk; + size_t old_size = ((char *)old_last_chunk) - (char *)lm.first_chunk; if (new_size < old_size) { /* check if there is enough free space at the end to allow such a reduction */ - size_t lsize = last_chunk->prev_size; + size_t lsize = lm.last_chunk->prev_size; assert(lsize != THIS_CHUNK_FREE); if (lsize == BOTH_CHUNKS_USED) - return 0; + goto fail; lsize += CHUNK_HEADER_SIZE; - mchunk_t *prev_chunk = chunk_at_offset(last_chunk, -lsize); + mchunk_t *prev_chunk = chunk_at_offset(lm.last_chunk, -lsize); if (((char *)new_last_chunk) < ((char *)prev_chunk) + sizeof(struct malloc_chunk)) - return 0; + goto fail; /* unlink the prev_chunk from the doubly-linked list */ - prev_chunk->d.next->prev = prev_chunk->d.prev; - prev_chunk->d.prev->next = prev_chunk->d.next; + unlink_chunk(prev_chunk); /* reduce the prev_chunk */ - assert((prev_chunk->size & ~FLAG_SORTED) == last_chunk->prev_size); + assert(prev_chunk->size == lm.last_chunk->prev_size); prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk - CHUNK_HEADER_SIZE; /* make a fresh-new last chunk */ new_last_chunk->prev_size = prev_chunk->size; new_last_chunk->size = END_MARKER; - last_chunk = new_last_chunk; - assert(last_chunk == next_chunk_u(prev_chunk)); + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(prev_chunk)); insert_unsorted(prev_chunk); } else if (new_size > old_size) { /* make the new last chunk first, with only the extra size */ - mchunk_t *old_last_chunk = last_chunk; + mchunk_t *old_last_chunk = lm.last_chunk; old_last_chunk->size = (new_size - old_size) - CHUNK_HEADER_SIZE; new_last_chunk->prev_size = BOTH_CHUNKS_USED; new_last_chunk->size = END_MARKER; - last_chunk = new_last_chunk; - assert(last_chunk == next_chunk_u(old_last_chunk)); + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(old_last_chunk)); /* then free the last_chunk (turn it from "used" to "free) */ - _stm_large_free((char *)&old_last_chunk->d); + _large_free(old_last_chunk); } - return 1; + + result = 1; + fail: + lm_unlock(); + return result; } static inline bool _largemalloc_sweep_keep(mchunk_t *chunk) { -#ifdef STM_TESTS +#ifdef STM_LARGEMALLOC_TEST if (_stm_largemalloc_keep != NULL) return _stm_largemalloc_keep((char *)&chunk->d); #endif @@ -453,31 +590,32 @@ void _stm_largemalloc_sweep(void) { - /* This may be slightly optimized by inlining _stm_large_free() and + lm_lock(); + + /* This may be slightly optimized by inlining _large_free() and making cases, e.g. we might know already if the previous block was free or not. It's probably not really worth it. */ - mchunk_t *mnext, *chunk = first_chunk; + mchunk_t *mnext, *chunk = lm.first_chunk; if (chunk->prev_size == THIS_CHUNK_FREE) - chunk = next_chunk_a(chunk); /* go to the first non-free chunk */ + chunk = next_chunk(chunk); /* go to the first non-free chunk */ - while (chunk != last_chunk) { - + while (chunk != lm.last_chunk) { /* here, the chunk we're pointing to is not free */ assert(chunk->prev_size != THIS_CHUNK_FREE); /* first figure out the next non-free chunk */ - mnext = next_chunk_u(chunk); + mnext = next_chunk(chunk); if (mnext->prev_size == THIS_CHUNK_FREE) - mnext = next_chunk_a(mnext); + mnext = next_chunk(mnext); /* use the callback to know if 'chunk' contains an object that survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { - size_t size = chunk->size; - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); - _stm_large_free((char *)&chunk->d); /* dies */ + _large_free(chunk); /* dies */ } chunk = mnext; } + + lm_unlock(); } diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -75,19 +75,6 @@ uint64_t _stm_total_allocated(void) { - mutex_pages_lock(); - uint64_t result = increment_total_allocated(0); - mutex_pages_unlock(); - return result; -} - -void _stm_mutex_pages_lock(void) -{ - mutex_pages_lock(); -} - -void _stm_mutex_pages_unlock(void) -{ - mutex_pages_unlock(); + return increment_total_allocated(0); } #endif diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -193,9 +193,7 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) { - mutex_pages_lock(); synchronize_object_now(obj); - mutex_pages_unlock(); } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); @@ -231,23 +229,13 @@ /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { - bool locked = false; wlog_t *item; + TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); - if (!locked) { - mutex_pages_lock(); - locked = true; - } - char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); - ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; - if (locked) - mutex_pages_unlock(); - tree_clear(pseg->young_outside_nursery); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -5,16 +5,12 @@ /************************************************************/ -static union { - struct { - uint8_t mutex_pages; - volatile bool major_collection_requested; - uint64_t total_allocated; /* keep track of how much memory we're - using, ignoring nurseries */ - uint64_t total_allocated_bound; - }; - char reserved[64]; -} pages_ctl __attribute__((aligned(64))); +struct { + volatile bool major_collection_requested; + uint64_t total_allocated; /* keep track of how much memory we're + using, ignoring nurseries */ + uint64_t total_allocated_bound; +} pages_ctl; static void setup_pages(void) @@ -28,37 +24,15 @@ memset(pages_privatized, 0, sizeof(pages_privatized)); } -static void mutex_pages_lock(void) -{ - if (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) == 0) - return; - - int previous = change_timing_state(STM_TIME_SPIN_LOOP); - while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { - spin_loop(); - } - change_timing_state(previous); -} - -static void mutex_pages_unlock(void) -{ - __sync_lock_release(&pages_ctl.mutex_pages); -} - -static bool _has_mutex_pages(void) -{ - return pages_ctl.mutex_pages != 0; -} - static uint64_t increment_total_allocated(ssize_t add_or_remove) { - assert(_has_mutex_pages()); - pages_ctl.total_allocated += add_or_remove; + uint64_t ta = __sync_add_and_fetch(&pages_ctl.total_allocated, + add_or_remove); - if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) + if (ta >= pages_ctl.total_allocated_bound) pages_ctl.major_collection_requested = true; - return pages_ctl.total_allocated; + return ta; } static bool is_major_collection_requested(void) @@ -95,6 +69,17 @@ (void *)((addr - stm_object_pages) % (4096UL * NB_PAGES)), (long)pgoff / NB_PAGES, (void *)((pgoff % NB_PAGES) * 4096UL))); + assert(size % 4096 == 0); + assert(size <= TOTAL_MEMORY); + assert(((uintptr_t)addr) % 4096 == 0); + assert(addr >= stm_object_pages); + assert(addr <= stm_object_pages + TOTAL_MEMORY - size); + assert(pgoff >= 0); + assert(pgoff <= (TOTAL_MEMORY - size) / 4096UL); + + /* assert remappings follow the rule that page N in one segment + can only be remapped to page N in another segment */ + assert(((addr - stm_object_pages) / 4096UL - pgoff) % NB_PAGES == 0); int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) @@ -106,10 +91,12 @@ /* call remap_file_pages() to make all pages in the range(pagenum, pagenum+count) refer to the same physical range of pages from segment 0. */ - uintptr_t i; - assert(_has_mutex_pages()); + dprintf(("pages_initialize_shared: 0x%ld - 0x%ld\n", pagenum, + pagenum + count)); + assert(pagenum < NB_PAGES); if (count == 0) return; + uintptr_t i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, @@ -119,14 +106,20 @@ static void page_privatize(uintptr_t pagenum) { - if (is_private_page(STM_SEGMENT->segment_num, pagenum)) { - /* the page is already privatized */ + /* check this thread's 'pages_privatized' bit */ + uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); + struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps->by_segment & bitmask) { + /* the page is already privatized; nothing to do */ return; } - /* lock, to prevent concurrent threads from looking up this thread's - 'pages_privatized' bits in parallel */ - mutex_pages_lock(); +#ifndef NDEBUG + spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); +#endif + + /* add this thread's 'pages_privatized' bit */ + __sync_fetch_and_add(&ps->by_segment, bitmask); /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -140,11 +133,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); - /* add this thread's 'pages_privatized' bit */ - uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - pages_privatized[pagenum - PAGE_FLAG_START].by_segment |= bitmask; - - mutex_pages_unlock(); +#ifndef NDEBUG + spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); +#endif } static void _page_do_reshare(long segnum, uintptr_t pagenum) diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -34,6 +34,20 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; +/* Rules for concurrent access to this array, possibly with is_private_page(): + + - we clear bits only during major collection, when all threads are + synchronized anyway + + - we set only the bit corresponding to our segment number, using + an atomic addition; and we do it _before_ we actually make the + page private. + + - concurrently, other threads checking the bits might (rarely) + get the answer 'true' to is_private_page() even though it is not + actually private yet. This inconsistency is in the direction + that we want for synchronize_object_now(). +*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -41,10 +55,6 @@ static void _page_do_reshare(long segnum, uintptr_t pagenum); static void pages_setup_readmarkers_for_nursery(void); -/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ -static void mutex_pages_lock(void); -static void mutex_pages_unlock(void); -static bool _has_mutex_pages(void) __attribute__((unused)); static uint64_t increment_total_allocated(ssize_t add_or_remove); static bool is_major_collection_requested(void); static void force_major_collection_request(void); @@ -62,3 +72,7 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } + +#ifndef NDEBUG +static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; +#endif diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -9,7 +9,7 @@ PROT_READ | PROT_WRITE, MAP_PAGES_FLAGS, -1, 0); if (result == MAP_FAILED) - stm_fatalerror("%s failed: %m\n", reason); + stm_fatalerror("%s failed: %m", reason); return result; } @@ -132,17 +132,37 @@ teardown_pages(); } +static void _shadowstack_trap_page(char *start, int prot) +{ + size_t bsize = STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s); + char *end = start + bsize + 4095; + end -= (((uintptr_t)end) & 4095); + mprotect(end, 4096, prot); +} + static void _init_shadow_stack(stm_thread_local_t *tl) { - struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) - malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); - assert(s); + size_t bsize = STM_SHADOW_STACK_DEPTH * sizeof(struct stm_shadowentry_s); + char *start = malloc(bsize + 8192); /* for the trap page, plus rounding */ + if (!start) + stm_fatalerror("can't allocate shadow stack"); + + /* set up a trap page: if the shadowstack overflows, it will + crash in a clean segfault */ + _shadowstack_trap_page(start, PROT_NONE); + + struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; } static void _done_shadow_stack(stm_thread_local_t *tl) { + assert(tl->shadowstack >= tl->shadowstack_base); + + char *start = (char *)tl->shadowstack_base; + _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); + free(tl->shadowstack_base); tl->shadowstack = NULL; tl->shadowstack_base = NULL; diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -55,7 +55,6 @@ "minor gc", "major gc", "sync pause", - "spin loop", }; void stm_flush_timing(stm_thread_local_t *tl, int verbose) diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -34,7 +34,7 @@ stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); long i; - for (i = 1; i <= NB_SEGMENTS; i++) { + for (i = 0; i <= NB_SEGMENTS; i++) { char *base = get_segment_base(i); object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); *ref_loc = value; @@ -57,11 +57,14 @@ a young outside nursery object. */ assert(_is_in_nursery(item)); object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + ssize_t size = 16; - /* the following checks are done like in nursery.c: */ - if (!(item->stm_flags & GCFLAG_HAS_SHADOW) - || (pforwarded_array[0] != GCWORD_MOVED)) { - /* weakref dies */ + /* check if the weakref object was moved out of the nursery */ + if (pforwarded_array[0] != GCWORD_MOVED) { + /* no: weakref dies */ +#ifndef NDEBUG + *WEAKREF_PTR(item, size) = (object_t *)-99; +#endif continue; } @@ -69,15 +72,13 @@ assert(!_is_young(item)); - ssize_t size = 16; object_t *pointing_to = *WEAKREF_PTR(item, size); assert(pointing_to != NULL); if (_is_in_nursery(pointing_to)) { object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; - /* the following checks are done like in nursery.c: */ - if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) - || (pforwarded_array[0] != GCWORD_MOVED)) { + /* check if the target was moved out of the nursery */ + if (pforwarded_array[0] != GCWORD_MOVED) { /* pointing_to dies */ _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ @@ -96,7 +97,9 @@ _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ } - /* pointing_to was already old */ + /* pointing_to is either a surviving young object outside + the nursery, or it was already old; in both cases keeping + the currently stored pointer is what we need */ } LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); })); @@ -128,7 +131,7 @@ stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); char *real_wr = REAL_ADDRESS(pseg->pub.segment_base, wr); object_t *pointing_to = *(object_t **)real_wr; - assert(pointing_to != NULL); + assert((uintptr_t)pointing_to >= NURSERY_END); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); _set_weakref_in_all_segments(weakref, NULL); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -70,7 +70,6 @@ STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, STM_TIME_SYNC_PAUSE, - STM_TIME_SPIN_LOOP, _STM_TIME_N }; @@ -136,8 +135,6 @@ object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); uint64_t _stm_total_allocated(void); -void _stm_mutex_pages_lock(void); -void _stm_mutex_pages_unlock(void); char *stm_object_pages; #endif @@ -262,6 +259,10 @@ void stm_setup(void); void stm_teardown(void); +/* The size of each shadow stack, in number of entries. + Must be big enough to accomodate all STM_PUSH_ROOTs! */ +#define STM_SHADOW_STACK_DEPTH 163840 + /* Push and pop roots from/to the shadow stack. Only allowed inside transaction. */ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -96,8 +96,6 @@ void stm_collect(long level); uint64_t _stm_total_allocated(void); -void _stm_mutex_pages_lock(void); -void _stm_mutex_pages_unlock(void); long stm_identityhash(object_t *obj); long stm_id(object_t *obj); @@ -279,6 +277,7 @@ ''', sources=source_files, define_macros=[('STM_TESTS', '1'), + ('STM_LARGEMALLOC_TEST', '1'), ('STM_NO_COND_WAIT', '1'), ('STM_DEBUGPRINT', '1'), ('GC_N_SMALL_REQUESTS', str(GC_N_SMALL_REQUESTS)), #check diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -14,10 +14,12 @@ lib.memset(self.rawmem, 0xcd, self.size) lib._stm_largemalloc_init_arena(self.rawmem, self.size) - lib._stm_mutex_pages_lock() # for this file def test_simple(self): + # + lib._stm_large_dump() d1 = lib._stm_large_malloc(7000) + lib._stm_large_dump() d2 = lib._stm_large_malloc(8000) print d1 print d2 @@ -70,7 +72,7 @@ lib._stm_large_dump() def test_resize_arena_reduce_2(self): - lib._stm_large_malloc(self.size // 2 - 64) + lib._stm_large_malloc(self.size // 2 - 80) r = lib._stm_largemalloc_resize_arena(self.size // 2) assert r == 1 lib._stm_large_dump() @@ -120,7 +122,7 @@ p.append((d, sz, content1, content2)) lib._stm_large_dump() - def test_random_largemalloc_sweep(self): + def test_random_largemalloc_sweep(self, constrained_size_range=False): @ffi.callback("bool(char *)") def keep(data): try: @@ -138,7 +140,11 @@ r = random.Random(1000) for j in range(500): - sizes = [random.choice(range(104, 500, 8)) for i in range(20)] + if constrained_size_range: + max = 120 + else: + max = 500 + sizes = [random.choice(range(104, max, 8)) for i in range(20)] all = [lib._stm_large_malloc(size) for size in sizes] print all @@ -170,3 +176,6 @@ assert all[i][50] == chr(65 + i) else: assert all_orig[i][50] == '\xDE' + + def test_random_largemalloc_sweep_constrained_size_range(self): + self.test_random_largemalloc_sweep(constrained_size_range=True) diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -360,3 +360,40 @@ self.switch(1) make_wr() stm_major_collect() + + +class TestManyThreads(BaseTest): + NB_THREADS = NB_SEGMENTS + + def test_weakref_bug3(self): + # make an object + self.start_transaction() + lp0 = stm_allocate(16) + self.push_root(lp0) + self.commit_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + # + # privatize the page in all segments + for i in range(NB_SEGMENTS-1, -1, -1): + self.switch(i) + self.start_transaction() + stm_set_char(lp0, 'A') + self.commit_transaction() + # + self.start_transaction() + lp2 = stm_allocate(16) + self.push_root(lp2) + lp1 = stm_allocate_weakref(lp2) + self.push_root(lp1) + self.commit_transaction() + lp1 = self.pop_root() + lp2 = self.pop_root() + self.push_root(lp2) + self.push_root(lp1) + # the commit copies the weakref to all segments, but misses + # segment #0 + # + self.start_transaction() + stm_major_collect() # reshare all, keeping only segment #0 + assert stm_get_weakref(lp1) == lp2 From noreply at buildbot.pypy.org Sun Apr 13 18:25:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 13 Apr 2014 18:25:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: An obscure probable bug fix, with documentation but no test Message-ID: <20140413162514.9BF571C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1154:921c696f1903 Date: 2014-04-12 13:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/921c696f1903/ Log: An obscure probable bug fix, with documentation but no test diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -384,6 +384,23 @@ EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ } + /* Do a full memory barrier. We must make sure that other + CPUs see the changes we did to the shared page ("S", + above) before we check the other segments below with + is_private_page(). Otherwise, we risk the following: + this CPU writes "S" but the writes are not visible yet; + then it checks is_private_page() and gets false, and does + nothing more; just afterwards another CPU sets its own + private_page bit and copies the page; but it risks doing + so before seeing the "S" writes. + + XXX what is the cost of this? If it's high, then we + should reorganize the code so that we buffer the second + parts and do them by bunch of N, after just one call to + __sync_synchronize()... + */ + __sync_synchronize(); + for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; From noreply at buildbot.pypy.org Sun Apr 13 18:25:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 13 Apr 2014 18:25:15 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: hg merge default Message-ID: <20140413162515.B55CD1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1155:dc5ea72bcc5f Date: 2014-04-12 13:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/dc5ea72bcc5f/ Log: hg merge default diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -417,6 +417,23 @@ EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ } + /* Do a full memory barrier. We must make sure that other + CPUs see the changes we did to the shared page ("S", + above) before we check the other segments below with + is_private_page(). Otherwise, we risk the following: + this CPU writes "S" but the writes are not visible yet; + then it checks is_private_page() and gets false, and does + nothing more; just afterwards another CPU sets its own + private_page bit and copies the page; but it risks doing + so before seeing the "S" writes. + + XXX what is the cost of this? If it's high, then we + should reorganize the code so that we buffer the second + parts and do them by bunch of N, after just one call to + __sync_synchronize()... + */ + __sync_synchronize(); + for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; From noreply at buildbot.pypy.org Sun Apr 13 18:25:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 13 Apr 2014 18:25:16 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: in-progress Message-ID: <20140413162516.C9C2B1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1156:96ab628450d6 Date: 2014-04-12 14:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/96ab628450d6/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -332,129 +332,107 @@ } } -static void synchronize_object_now(object_t *obj) +static inline void _synchronize_fragment(stm_char *frag, ssize_t frag_size) +{ + /* First copy the object into the shared page, if needed */ + uintptr_t page = ((uintptr_t)obj) / 4096UL; + + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, frag); + char *dst = REAL_ADDRESS(stm_object_pages, frag); + if (is_private_page(STM_SEGMENT->segment_num, page)) + memcpy(dst, src, frag_size); + else + EVENTUALLY(memcmp(dst, src, frag_size) == 0); /* same page */ + + /* Then enqueue this object (or fragemnt of object) */ + if (STM_PSEGMENT->sq_len == SYNC_QUEUE_SIZE) + synchronize_objects_flush(); + STM_PSEGMENT->sq_fragments[STM_PSEGMENT->sq_len] = frag; + STM_PSEGMENT->sq_fragsizes[STM_PSEGMENT->sq_len] = frag_size; + ++STM_PSEGMENT->sq_len; +} + +static void synchronize_object_enqueue(object_t *obj) { /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other - segments' own private pages. + segments' own private pages. (The second part might be done + later; call synchronize_objects_flush() to flush this queue.) */ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)dst); + OPT_ASSERT(obj_size >= 16); + if (LIKELY(is_small_uniform(obj))) { + _synchronize_fragment((stm_char *)obj, obj_size); + return; + } + + /* else, a more complicated case for large objects, to copy + around data only within the needed pages + */ uintptr_t start = (uintptr_t)obj; - uintptr_t first_page = start / 4096UL; - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + uintptr_t end = start + obj_size; + + do { + uintptr_t copy_up_to = (start + 4096) & ~4095; /* end of page */ + if (copy_up_to >= end) { + copy_up_to = end; /* this is the last fragment */ + } + uintptr_t copy_size = copy_up_to - start; + + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + _synchronize_fragment((stm_char *)start, copy_size); + + start = copy_up_to; + + } while (start != end); +} + +static void synchronize_objects_flush(void) +{ + + /* Do a full memory barrier. We must make sure that other + CPUs see the changes we did to the shared page ("S", in + synchronize_object_enqueue()) before we check the other segments + with is_private_page() (below). Otherwise, we risk the + following: this CPU writes "S" but the writes are not visible yet; + then it checks is_private_page() and gets false, and does nothing + more; just afterwards another CPU sets its own private_page bit + and copies the page; but it risks doing so before seeing the "S" + writes. + */ + long j = STM_PSEGMENT->sq_len; + if (j == 0) + return; + STM_PSEGMENT->sq_len = 0; + + __sync_synchronize(); + long i, myself = STM_SEGMENT->segment_num; + do { + --j; + stm_char *frag = STM_PSEGMENT->sq_fragments[j]; + uintptr_t page = ((uintptr_t)frag) / 4096UL; + if (!any_other_private_page(myself, page)) + continue; - if (is_small_uniform(obj)) { - /* First copy the object into the shared page, if needed */ - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); - ssize_t obj_size = 0; /* computed lazily, only if needed */ - - if (is_private_page(myself, first_page)) { - obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - memcpy(dst, src, obj_size); - } - else { - assert(memcmp(dst, src, /* already identical */ - stmcb_size_rounded_up((struct object_s *)realobj)) == 0); - } + ssize_t frag_size = STM_PSEGMENT->sq_fragsizes[j]; for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; - src = REAL_ADDRESS(stm_object_pages, start); - dst = REAL_ADDRESS(get_segment_base(i), start); - if (is_private_page(i, first_page)) { - /* The page is a private page. We need to diffuse this - object from the shared page to this private page. */ - if (obj_size == 0) { - obj_size = - stmcb_size_rounded_up((struct object_s *)src); - } - memcpy(dst, src, obj_size); - } - else { - assert(memcmp(dst, src, /* already identical */ - stmcb_size_rounded_up((struct object_s *)src)) == 0); - } - } - } - else { - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); - uintptr_t end = start + obj_size; - uintptr_t last_page = (end - 1) / 4096UL; - - for (; first_page <= last_page; first_page++) { - - uintptr_t copy_size; - if (first_page == last_page) { - /* this is the final fragment */ - copy_size = end - start; - } - else { - /* this is a non-final fragment, going up to the - page's end */ - copy_size = 4096 - (start & 4095); - } - /* double-check that the result fits in one page */ - assert(copy_size > 0); - assert(copy_size + (start & 4095) <= 4096); - - /* First copy the object into the shared page, if needed */ - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); - if (is_private_page(myself, first_page)) { - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ - } - - /* Do a full memory barrier. We must make sure that other - CPUs see the changes we did to the shared page ("S", - above) before we check the other segments below with - is_private_page(). Otherwise, we risk the following: - this CPU writes "S" but the writes are not visible yet; - then it checks is_private_page() and gets false, and does - nothing more; just afterwards another CPU sets its own - private_page bit and copies the page; but it risks doing - so before seeing the "S" writes. - - XXX what is the cost of this? If it's high, then we - should reorganize the code so that we buffer the second - parts and do them by bunch of N, after just one call to - __sync_synchronize()... - */ - __sync_synchronize(); - - for (i = 1; i <= NB_SEGMENTS; i++) { - if (i == myself) - continue; - - src = REAL_ADDRESS(stm_object_pages, start); - dst = REAL_ADDRESS(get_segment_base(i), start); - if (is_private_page(i, first_page)) { - /* The page is a private page. We need to diffuse this - fragment of object from the shared page to this private - page. */ - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ - } - } - - start = (start + 4096) & ~4095; + char *src = REAL_ADDRESS(stm_object_pages, frag); + char *dst = REAL_ADDRESS(get_segment_base(i), frag); + if (is_private_page(i, page)) + memcpy(dst, src, frag_size); + else + EVENTUALLY(memcmp(dst, src, frag_size) == 0); /* same page */ } } } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -59,6 +59,8 @@ GCFLAG_OVERFLOW_NUMBER_bit0 = 0x4 /* must be last */ }; +#define SYNC_QUEUE_SIZE 31 + /************************************************************/ @@ -155,6 +157,12 @@ /* This is for smallmalloc.c */ struct small_malloc_data_s small_malloc_data; + + /* The sync queue is used to minimize the number of __sync_synchronize + calls needed. */ + stm_char *sq_fragments[SYNC_QUEUE_SIZE]; + int sq_fragsizes[SYNC_QUEUE_SIZE]; + int sq_len; }; enum /* safe_point */ { @@ -226,4 +234,5 @@ } static void copy_object_to_shared(object_t *obj, int source_segment_num); -static void synchronize_object_now(object_t *obj); +static void synchronize_object_enqueue(object_t *obj); +static void synchronize_objects_flush(void); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -56,7 +56,6 @@ /************************************************************/ #define GCWORD_MOVED ((object_t *) -1) -#define FLAG_SYNC_LARGE 0x01 static void minor_trace_if_young(object_t **pobj) @@ -145,8 +144,7 @@ } /* Must trace the object later */ - uintptr_t nobj_sync_now = (uintptr_t)nobj | !is_small_uniform(nobj); - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj); } static void collect_roots_in_nursery(void) @@ -179,29 +177,20 @@ static void collect_oldrefs_to_nursery(void) { struct list_s *lst = STM_PSEGMENT->objects_pointing_to_nursery; + assert(STM_PSEGMENT->minor_collect_will_commit_now); while (!list_is_empty(lst)) { - uintptr_t obj_sync_now = list_pop_item(lst); - object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE); + object_t *obj = (object_t *)list_pop_item(lst); _collect_now(obj); - if (obj_sync_now & FLAG_SYNC_LARGE) { - /* this was a large object. We must either synchronize the - object to other segments now (after we added the - WRITE_BARRIER flag and traced into it to fix its - content); or add the object to 'large_overflow_objects'. - */ - if (STM_PSEGMENT->minor_collect_will_commit_now) { - synchronize_object_now(obj); - } - else - LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); - } + synchronize_object_enqueue(obj); /* the list could have moved while appending */ lst = STM_PSEGMENT->objects_pointing_to_nursery; } + + synchronize_objects_flush(); } static void collect_modified_old_objects(void) @@ -284,6 +273,9 @@ following shortcut */ collect_modified_old_objects(); } + else { + abort(); // handle specially the objects_pointing_to_nursery already there + } collect_roots_in_nursery(); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -67,6 +67,20 @@ return (pages_privatized[pagenum - PAGE_FLAG_START].by_segment & bitmask); } +static inline bool any_private_page(uintptr_t pagenum) +{ + assert(pagenum >= PAGE_FLAG_START); + return pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0; +} + +static inline bool any_other_private_page(long exclsegnum, uintptr_t pagenum) +{ + assert(pagenum >= PAGE_FLAG_START); + uint64_t bitmask = 1UL << (exclsegnum - 1); + return ((pages_privatized[pagenum - PAGE_FLAG_START].by_segment & ~bitmask) + != 0); +} + static inline void page_check_and_reshare(uintptr_t pagenum) { if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) From noreply at buildbot.pypy.org Sun Apr 13 18:25:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 13 Apr 2014 18:25:17 +0200 (CEST) Subject: [pypy-commit] stmgc default: Another TODO Message-ID: <20140413162517.C5E7E1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1157:a158a889e78b Date: 2014-04-13 18:21 +0200 http://bitbucket.org/pypy/stmgc/changeset/a158a889e78b/ Log: Another TODO diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -11,3 +11,8 @@ - fork() is done by copying the whole mmap non-lazily; improve. - contention.c: when pausing: should also tell other_pseg "please commit soon" + +- resharing: remap_file_pages on multiple pages at once; and madvise() + the unused pages away --- or maybe use consecutive addresses from the + lowest ones from segment N, instead of the page corresponding to the page + number in segment 0 (possibly a bit messy) From noreply at buildbot.pypy.org Sun Apr 13 18:41:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 13 Apr 2014 18:41:58 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/a158a889e78b and remove the spinlock_acquire() Message-ID: <20140413164158.D59AF1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70614:06e3816b1e7b Date: 2014-04-13 18:41 +0200 http://bitbucket.org/pypy/pypy/changeset/06e3816b1e7b/ Log: import stmgc/a158a889e78b and remove the spinlock_acquire() function from stmgcintf.h diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c --- a/rpython/translator/c/src/mem.c +++ b/rpython/translator/c/src/mem.c @@ -48,7 +48,7 @@ // spinlock_acquire/spinlock_release defined in ../../stm/src_stm/stmgcintf.h static Signed pypy_debug_alloc_lock = 0; #else -# define spinlock_acquire(lock, targetvalue) /* nothing */ +# define spinlock_acquire(lock) /* nothing */ # define spinlock_release(lock) /* nothing */ #endif @@ -58,7 +58,7 @@ RPyAssert(p, "out of memory"); p->addr = addr; p->funcname = funcname; - spinlock_acquire(pypy_debug_alloc_lock, '+'); + spinlock_acquire(pypy_debug_alloc_lock); p->next = pypy_debug_alloc_list; pypy_debug_alloc_list = p; spinlock_release(pypy_debug_alloc_lock); @@ -67,7 +67,7 @@ int try_pypy_debug_alloc_stop(void *addr) { struct pypy_debug_alloc_s **p; - spinlock_acquire(pypy_debug_alloc_lock, '-'); + spinlock_acquire(pypy_debug_alloc_lock); for (p = &pypy_debug_alloc_list; *p; p = &((*p)->next)) if ((*p)->addr == addr) { @@ -92,7 +92,7 @@ { long count = 0; struct pypy_debug_alloc_s *p; - spinlock_acquire(pypy_debug_alloc_lock, 'R'); + spinlock_acquire(pypy_debug_alloc_lock); for (p = pypy_debug_alloc_list; p; p = p->next) count++; if (count > 0) diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -0492e398156b +a158a889e78b diff --git a/rpython/translator/stm/src_stm/stm/atomic.h b/rpython/translator/stm/src_stm/stm/atomic.h --- a/rpython/translator/stm/src_stm/stm/atomic.h +++ b/rpython/translator/stm/src_stm/stm/atomic.h @@ -37,4 +37,12 @@ #endif +#define spinlock_acquire(lock) \ + do { if (LIKELY(__sync_lock_test_and_set(&(lock), 1) == 0)) break; \ + spin_loop(); } while (1) +#define spinlock_release(lock) \ + do { assert((lock) == 1); \ + __sync_lock_release(&(lock)); } while (0) + + #endif /* _STM_ATOMIC_H */ diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -9,6 +9,23 @@ memset(write_locks, 0, sizeof(write_locks)); } +#ifdef NDEBUG +#define EVENTUALLY(condition) /* nothing */ +#else +#define EVENTUALLY(condition) \ + { \ + if (!(condition)) { \ + int _i; \ + for (_i = 1; _i <= NB_SEGMENTS; _i++) \ + spinlock_acquire(lock_pages_privatizing[_i]); \ + if (!(condition)) \ + stm_fatalerror("fails: " #condition); \ + for (_i = 1; _i <= NB_SEGMENTS; _i++) \ + spinlock_release(lock_pages_privatizing[_i]); \ + } \ + } +#endif + static void check_flag_write_barrier(object_t *obj) { /* check that all copies of the object, apart from mine, have the @@ -22,12 +39,7 @@ if (i == STM_SEGMENT->segment_num) continue; o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); - if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) { - mutex_pages_lock(); /* try again... */ - if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) - stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); - mutex_pages_unlock(); - } + EVENTUALLY(o1->stm_flags & GCFLAG_WRITE_BARRIER); } #endif } @@ -272,7 +284,6 @@ with synchronize_object_now() but I don't completely see how to improve... */ - assert(_has_mutex_pages()); assert(!_is_young(obj)); char *segment_base = get_segment_base(source_segment_num); @@ -327,10 +338,7 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. - - This must be called with the mutex_pages_lock! */ - assert(_has_mutex_pages()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); @@ -374,9 +382,26 @@ memcpy(dst, src, copy_size); } else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ + EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ } + /* Do a full memory barrier. We must make sure that other + CPUs see the changes we did to the shared page ("S", + above) before we check the other segments below with + is_private_page(). Otherwise, we risk the following: + this CPU writes "S" but the writes are not visible yet; + then it checks is_private_page() and gets false, and does + nothing more; just afterwards another CPU sets its own + private_page bit and copies the page; but it risks doing + so before seeing the "S" writes. + + XXX what is the cost of this? If it's high, then we + should reorganize the code so that we buffer the second + parts and do them by bunch of N, after just one call to + __sync_synchronize()... + */ + __sync_synchronize(); + for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; @@ -393,7 +418,7 @@ memcpy(dst, src, copy_size); } else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ + EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ } } @@ -486,12 +511,10 @@ major_collection_now_at_safe_point(); /* synchronize overflow objects living in privatized pages */ - mutex_pages_lock(); push_overflow_objects_from_privatized_pages(); /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); - mutex_pages_unlock(); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -71,7 +71,6 @@ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); - mutex_pages_lock(); /* Make a new mmap at some other address, but of the same size as the standard mmap at stm_object_pages @@ -167,7 +166,6 @@ fork_big_copy = NULL; bool was_in_transaction = fork_was_in_transaction; - mutex_pages_unlock(); s_mutex_unlock(); if (!was_in_transaction) { @@ -204,7 +202,6 @@ /* this new process contains no other thread, so we can just release these locks early */ - mutex_pages_unlock(); s_mutex_unlock(); /* Move the copy of the mmap over the old one, overwriting it diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -39,6 +39,7 @@ static void grab_more_free_pages_for_small_allocations(void) { + abort();//XXX /* grab N (= GCPAGE_NUM_PAGES) pages out of the top addresses */ uintptr_t decrease_by = GCPAGE_NUM_PAGES * 4096; if (uninitialized_page_stop - uninitialized_page_start <= decrease_by) @@ -76,17 +77,22 @@ } +static int lock_growth_large = 0; + static char *allocate_outside_nursery_large(uint64_t size) { - /* thread-safe: use the lock of pages.c to prevent any remapping - from occurring under our feet */ - mutex_pages_lock(); - /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) stm_fatalerror("not enough memory!"); + if (LIKELY(addr + size <= uninitialized_page_start)) { + return addr; + } + + /* uncommon case: need to initialize some more pages */ + spinlock_acquire(lock_growth_large); + if (addr + size > uninitialized_page_start) { uintptr_t npages; npages = (addr + size - uninitialized_page_start) / 4096UL; @@ -96,11 +102,10 @@ stm_fatalerror("out of memory!"); /* XXX */ } setup_N_pages(uninitialized_page_start, npages); + __sync_synchronize(); uninitialized_page_start += npages * 4096UL; } - - mutex_pages_unlock(); - + spinlock_release(lock_growth_large); return addr; } @@ -256,7 +261,6 @@ total_allocated by 4096. */ long i; - mutex_pages_lock(); for (i = 1; i <= NB_SEGMENTS; i++) { /* The 'modified_old_objects' list gives the list of objects @@ -306,7 +310,6 @@ for (i = 1; i <= NB_SEGMENTS; i++) { major_restore_private_bits_for_modified_objects(i); } - mutex_pages_unlock(); } @@ -465,9 +468,7 @@ static void sweep_large_objects(void) { - mutex_pages_lock(); _stm_largemalloc_sweep(); - mutex_pages_unlock(); } static void clean_write_locks(void) diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c b/rpython/translator/stm/src_stm/stm/largemalloc.c --- a/rpython/translator/stm/src_stm/stm/largemalloc.c +++ b/rpython/translator/stm/src_stm/stm/largemalloc.c @@ -107,20 +107,35 @@ */ -static dlist_t largebins[N_BINS]; -static mchunk_t *first_chunk, *last_chunk; + +static struct { + int lock; + mchunk_t *first_chunk, *last_chunk; + dlist_t largebins[N_BINS]; +} lm __attribute__((aligned(64))); + + +static void lm_lock(void) +{ + spinlock_acquire(lm.lock); +} + +static void lm_unlock(void) +{ + spinlock_release(lm.lock); +} static void insert_unsorted(mchunk_t *new) { size_t index = LAST_BIN_INDEX(new->size) ? N_BINS - 1 : largebin_index(new->size); - new->d.next = &largebins[index]; - new->d.prev = largebins[index].prev; + new->d.next = &lm.largebins[index]; + new->d.prev = lm.largebins[index].prev; new->d.prev->next = &new->d; new->u.up = UU_UNSORTED; new->u.down = NULL; - largebins[index].prev = &new->d; + lm.largebins[index].prev = &new->d; } static int compare_chunks(const void *vchunk1, const void *vchunk2) @@ -140,8 +155,8 @@ static void really_sort_bin(size_t index) { - dlist_t *unsorted = largebins[index].prev; - dlist_t *end = &largebins[index]; + dlist_t *unsorted = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; dlist_t *scan = unsorted->prev; size_t count = 1; while (scan != end && data2chunk(scan)->u.up == UU_UNSORTED) { @@ -177,7 +192,7 @@ chunk1 = chunks[--count]; } size_t search_size = chunk1->size; - dlist_t *head = largebins[index].next; + dlist_t *head = lm.largebins[index].next; while (1) { if (head == end || data2chunk(head)->size < search_size) { @@ -219,8 +234,8 @@ static void sort_bin(size_t index) { - dlist_t *last = largebins[index].prev; - if (last != &largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) + dlist_t *last = lm.largebins[index].prev; + if (last != &lm.largebins[index] && data2chunk(last)->u.up == UU_UNSORTED) really_sort_bin(index); } @@ -263,13 +278,15 @@ if (request_size < MIN_ALLOC_SIZE) request_size = MIN_ALLOC_SIZE; + lm_lock(); + size_t index = largebin_index(request_size); sort_bin(index); /* scan through the chunks of current bin in reverse order to find the smallest that fits. */ - dlist_t *scan = largebins[index].prev; - dlist_t *end = &largebins[index]; + dlist_t *scan = lm.largebins[index].prev; + dlist_t *end = &lm.largebins[index]; mchunk_t *mscan; while (scan != end) { mscan = data2chunk(scan); @@ -287,16 +304,17 @@ smallest item of the first non-empty bin, as it will be large enough. */ while (++index < N_BINS) { - if (largebins[index].prev != &largebins[index]) { + if (lm.largebins[index].prev != &lm.largebins[index]) { /* non-empty bin. */ sort_bin(index); - scan = largebins[index].prev; + scan = lm.largebins[index].prev; mscan = data2chunk(scan); goto found; } } /* not enough memory. */ + lm_unlock(); return NULL; found: @@ -337,12 +355,13 @@ mscan->prev_size = BOTH_CHUNKS_USED; increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); + lm_unlock(); + return (char *)&mscan->d; } -void _stm_large_free(char *data) +static void _large_free(mchunk_t *chunk) { - mchunk_t *chunk = data2chunk(data); assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -350,9 +369,12 @@ increment_total_allocated(-(chunk->size + LARGE_MALLOC_OVERHEAD)); #ifndef NDEBUG - assert(chunk->size >= sizeof(dlist_t)); - assert(chunk->size <= (((char *)last_chunk) - (char *)data)); - memset(data, 0xDE, chunk->size); + { + char *data = (char *)&chunk->d; + assert(chunk->size >= sizeof(dlist_t)); + assert(chunk->size <= (((char *)lm.last_chunk) - data)); + memset(data, 0xDE, chunk->size); + } #endif /* try to merge with the following chunk in memory */ @@ -409,10 +431,18 @@ insert_unsorted(chunk); } +void _stm_large_free(char *data) +{ + lm_lock(); + _large_free(data2chunk(data)); + lm_unlock(); +} + void _stm_large_dump(void) { - char *data = ((char *)first_chunk) + 16; + lm_lock(); + char *data = ((char *)lm.first_chunk) + 16; size_t prev_size_if_free = 0; fprintf(stderr, "\n"); while (1) { @@ -447,12 +477,13 @@ data += 16; } fprintf(stderr, "\n %p: end. ]\n\n", data - 8); - assert(data - 16 == (char *)last_chunk); + assert(data - 16 == (char *)lm.last_chunk); + lm_unlock(); } char *_stm_largemalloc_data_start(void) { - return (char *)first_chunk; + return (char *)lm.first_chunk; } #ifdef STM_LARGEMALLOC_TEST @@ -463,21 +494,23 @@ { int i; for (i = 0; i < N_BINS; i++) { - largebins[i].prev = &largebins[i]; - largebins[i].next = &largebins[i]; + lm.largebins[i].prev = &lm.largebins[i]; + lm.largebins[i].next = &lm.largebins[i]; } assert(data_size >= 2 * sizeof(struct malloc_chunk)); assert((data_size & 31) == 0); - first_chunk = (mchunk_t *)data_start; - first_chunk->prev_size = THIS_CHUNK_FREE; - first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; - last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE); - last_chunk->prev_size = first_chunk->size; - last_chunk->size = END_MARKER; - assert(last_chunk == next_chunk(first_chunk)); + lm.first_chunk = (mchunk_t *)data_start; + lm.first_chunk->prev_size = THIS_CHUNK_FREE; + lm.first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; + lm.last_chunk = chunk_at_offset(lm.first_chunk, + data_size - CHUNK_HEADER_SIZE); + lm.last_chunk->prev_size = lm.first_chunk->size; + lm.last_chunk->size = END_MARKER; + assert(lm.last_chunk == next_chunk(lm.first_chunk)); + lm.lock = 0; - insert_unsorted(first_chunk); + insert_unsorted(lm.first_chunk); #ifdef STM_LARGEMALLOC_TEST _stm_largemalloc_keep = NULL; @@ -486,57 +519,64 @@ int _stm_largemalloc_resize_arena(size_t new_size) { + int result = 0; + lm_lock(); + if (new_size < 2 * sizeof(struct malloc_chunk)) - return 0; + goto fail; OPT_ASSERT((new_size & 31) == 0); new_size -= CHUNK_HEADER_SIZE; - mchunk_t *new_last_chunk = chunk_at_offset(first_chunk, new_size); - mchunk_t *old_last_chunk = last_chunk; - size_t old_size = ((char *)old_last_chunk) - (char *)first_chunk; + mchunk_t *new_last_chunk = chunk_at_offset(lm.first_chunk, new_size); + mchunk_t *old_last_chunk = lm.last_chunk; + size_t old_size = ((char *)old_last_chunk) - (char *)lm.first_chunk; if (new_size < old_size) { /* check if there is enough free space at the end to allow such a reduction */ - size_t lsize = last_chunk->prev_size; + size_t lsize = lm.last_chunk->prev_size; assert(lsize != THIS_CHUNK_FREE); if (lsize == BOTH_CHUNKS_USED) - return 0; + goto fail; lsize += CHUNK_HEADER_SIZE; - mchunk_t *prev_chunk = chunk_at_offset(last_chunk, -lsize); + mchunk_t *prev_chunk = chunk_at_offset(lm.last_chunk, -lsize); if (((char *)new_last_chunk) < ((char *)prev_chunk) + sizeof(struct malloc_chunk)) - return 0; + goto fail; /* unlink the prev_chunk from the doubly-linked list */ unlink_chunk(prev_chunk); /* reduce the prev_chunk */ - assert(prev_chunk->size == last_chunk->prev_size); + assert(prev_chunk->size == lm.last_chunk->prev_size); prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk - CHUNK_HEADER_SIZE; /* make a fresh-new last chunk */ new_last_chunk->prev_size = prev_chunk->size; new_last_chunk->size = END_MARKER; - last_chunk = new_last_chunk; - assert(last_chunk == next_chunk(prev_chunk)); + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(prev_chunk)); insert_unsorted(prev_chunk); } else if (new_size > old_size) { /* make the new last chunk first, with only the extra size */ - mchunk_t *old_last_chunk = last_chunk; + mchunk_t *old_last_chunk = lm.last_chunk; old_last_chunk->size = (new_size - old_size) - CHUNK_HEADER_SIZE; new_last_chunk->prev_size = BOTH_CHUNKS_USED; new_last_chunk->size = END_MARKER; - last_chunk = new_last_chunk; - assert(last_chunk == next_chunk(old_last_chunk)); + lm.last_chunk = new_last_chunk; + assert(lm.last_chunk == next_chunk(old_last_chunk)); /* then free the last_chunk (turn it from "used" to "free) */ - _stm_large_free((char *)&old_last_chunk->d); + _large_free(old_last_chunk); } - return 1; + + result = 1; + fail: + lm_unlock(); + return result; } @@ -551,15 +591,17 @@ void _stm_largemalloc_sweep(void) { - /* This may be slightly optimized by inlining _stm_large_free() and + lm_lock(); + + /* This may be slightly optimized by inlining _large_free() and making cases, e.g. we might know already if the previous block was free or not. It's probably not really worth it. */ - mchunk_t *mnext, *chunk = first_chunk; + mchunk_t *mnext, *chunk = lm.first_chunk; if (chunk->prev_size == THIS_CHUNK_FREE) chunk = next_chunk(chunk); /* go to the first non-free chunk */ - while (chunk != last_chunk) { + while (chunk != lm.last_chunk) { /* here, the chunk we're pointing to is not free */ assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -571,8 +613,10 @@ /* use the callback to know if 'chunk' contains an object that survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { - _stm_large_free((char *)&chunk->d); /* dies */ + _large_free(chunk); /* dies */ } chunk = mnext; } + + lm_unlock(); } diff --git a/rpython/translator/stm/src_stm/stm/misc.c b/rpython/translator/stm/src_stm/stm/misc.c --- a/rpython/translator/stm/src_stm/stm/misc.c +++ b/rpython/translator/stm/src_stm/stm/misc.c @@ -76,21 +76,6 @@ uint64_t _stm_total_allocated(void) { - mutex_pages_lock(); - uint64_t result = increment_total_allocated(0); - mutex_pages_unlock(); - return result; + return increment_total_allocated(0); } #endif - -#ifdef STM_LARGEMALLOC_TEST -void _stm_mutex_pages_lock(void) -{ - mutex_pages_lock(); -} - -void _stm_mutex_pages_unlock(void) -{ - mutex_pages_unlock(); -} -#endif diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -196,9 +196,7 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) { - mutex_pages_lock(); synchronize_object_now(obj); - mutex_pages_unlock(); } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); @@ -234,20 +232,13 @@ /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(pseg->young_outside_nursery)) { - bool locked = false; wlog_t *item; + TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); - if (!locked) { - mutex_pages_lock(); - locked = true; - } _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; - if (locked) - mutex_pages_unlock(); - tree_clear(pseg->young_outside_nursery); } diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -6,16 +6,12 @@ /************************************************************/ -static union { - struct { - uint8_t mutex_pages; - volatile bool major_collection_requested; - uint64_t total_allocated; /* keep track of how much memory we're - using, ignoring nurseries */ - uint64_t total_allocated_bound; - }; - char reserved[64]; -} pages_ctl __attribute__((aligned(64))); +struct { + volatile bool major_collection_requested; + uint64_t total_allocated; /* keep track of how much memory we're + using, ignoring nurseries */ + uint64_t total_allocated_bound; +} pages_ctl; static void setup_pages(void) @@ -29,37 +25,15 @@ memset(pages_privatized, 0, sizeof(pages_privatized)); } -static void mutex_pages_lock(void) -{ - if (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) == 0) - return; - - int previous = change_timing_state(STM_TIME_SPIN_LOOP); - while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { - spin_loop(); - } - change_timing_state(previous); -} - -static void mutex_pages_unlock(void) -{ - __sync_lock_release(&pages_ctl.mutex_pages); -} - -static bool _has_mutex_pages(void) -{ - return pages_ctl.mutex_pages != 0; -} - static uint64_t increment_total_allocated(ssize_t add_or_remove) { - assert(_has_mutex_pages()); - pages_ctl.total_allocated += add_or_remove; + uint64_t ta = __sync_add_and_fetch(&pages_ctl.total_allocated, + add_or_remove); - if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) + if (ta >= pages_ctl.total_allocated_bound) pages_ctl.major_collection_requested = true; - return pages_ctl.total_allocated; + return ta; } static bool is_major_collection_requested(void) @@ -118,10 +92,12 @@ /* call remap_file_pages() to make all pages in the range(pagenum, pagenum+count) refer to the same physical range of pages from segment 0. */ - uintptr_t i; - assert(_has_mutex_pages()); + dprintf(("pages_initialize_shared: 0x%ld - 0x%ld\n", pagenum, + pagenum + count)); + assert(pagenum < NB_PAGES); if (count == 0) return; + uintptr_t i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, @@ -131,14 +107,20 @@ static void page_privatize(uintptr_t pagenum) { - if (is_private_page(STM_SEGMENT->segment_num, pagenum)) { - /* the page is already privatized */ + /* check this thread's 'pages_privatized' bit */ + uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); + struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps->by_segment & bitmask) { + /* the page is already privatized; nothing to do */ return; } - /* lock, to prevent concurrent threads from looking up this thread's - 'pages_privatized' bits in parallel */ - mutex_pages_lock(); +#ifndef NDEBUG + spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); +#endif + + /* add this thread's 'pages_privatized' bit */ + __sync_fetch_and_add(&ps->by_segment, bitmask); /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -152,11 +134,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); - /* add this thread's 'pages_privatized' bit */ - uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - pages_privatized[pagenum - PAGE_FLAG_START].by_segment |= bitmask; - - mutex_pages_unlock(); +#ifndef NDEBUG + spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); +#endif } static void _page_do_reshare(long segnum, uintptr_t pagenum) diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -35,6 +35,20 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; +/* Rules for concurrent access to this array, possibly with is_private_page(): + + - we clear bits only during major collection, when all threads are + synchronized anyway + + - we set only the bit corresponding to our segment number, using + an atomic addition; and we do it _before_ we actually make the + page private. + + - concurrently, other threads checking the bits might (rarely) + get the answer 'true' to is_private_page() even though it is not + actually private yet. This inconsistency is in the direction + that we want for synchronize_object_now(). +*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -42,10 +56,6 @@ static void _page_do_reshare(long segnum, uintptr_t pagenum); static void pages_setup_readmarkers_for_nursery(void); -/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ -static void mutex_pages_lock(void); -static void mutex_pages_unlock(void); -static bool _has_mutex_pages(void) __attribute__((unused)); static uint64_t increment_total_allocated(ssize_t add_or_remove); static bool is_major_collection_requested(void); static void force_major_collection_request(void); @@ -64,4 +74,6 @@ page_reshare(pagenum); } -void _stm_mutex_pages_lock(void); +#ifndef NDEBUG +static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; +#endif diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -56,7 +56,6 @@ "minor gc", "major gc", "sync pause", - "spin loop", }; void stm_flush_timing(stm_thread_local_t *tl, int verbose) diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -71,7 +71,6 @@ STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, STM_TIME_SYNC_PAUSE, - STM_TIME_SPIN_LOOP, _STM_TIME_N }; @@ -134,8 +133,6 @@ object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); uint64_t _stm_total_allocated(void); -void _stm_mutex_pages_lock(void); -void _stm_mutex_pages_unlock(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -6,7 +6,7 @@ #include #include "stmgc.h" -#include "stm/atomic.h" /* for spin_loop() and write_fence() */ +#include "stm/atomic.h" /* for spin_loop(), write_fence(), spinlock_xxx() */ extern __thread struct stm_thread_local_s stm_thread_local; extern __thread long pypy_stm_ready_atomic; @@ -101,25 +101,4 @@ } -#if 0 /* fprinting versions */ -# define spinlock_acquire(lock, targetvalue) \ - do { if (__sync_bool_compare_and_swap(&(lock), 0, (targetvalue))) { \ - dprintf(("<<< locked %d\n", (int)targetvalue)); \ - break; \ - } \ - do { spin_loop(); } while (lock); \ - } while (1) -# define spinlock_release(lock) \ - do { dprintf(("unlocked >>>\n")); write_fence(); \ - assert((lock) != 0); (lock) = 0; } while (0) -#else -# define spinlock_acquire(lock, targetvalue) \ - do { if (__sync_bool_compare_and_swap(&(lock), 0, (targetvalue))) break; \ - do { spin_loop(); } while (lock); \ - } while (1) -# define spinlock_release(lock) \ - do { write_fence(); assert((lock) != 0); (lock) = 0; } while (0) -#endif - - #endif /* _RPY_STMGCINTF_H */ From noreply at buildbot.pypy.org Sun Apr 13 19:10:32 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 13 Apr 2014 19:10:32 +0200 (CEST) Subject: [pypy-commit] pypy pytest-25: sync to pytest 2.5.2 and pylib 1.4.20 Message-ID: <20140413171032.801A31D2527@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: pytest-25 Changeset: r70615:74b298986682 Date: 2014-04-12 21:31 +0100 http://bitbucket.org/pypy/pypy/changeset/74b298986682/ Log: sync to pytest 2.5.2 and pylib 1.4.20 diff too long, truncating to 2000 out of 10058 lines diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): - option = py.std.optparse.Option(*optnames, **attrs) + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) From noreply at buildbot.pypy.org Sun Apr 13 19:10:33 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 13 Apr 2014 19:10:33 +0200 (CEST) Subject: [pypy-commit] pypy pytest-25: sync pytest.py as well and kill pytest_cov.py Message-ID: <20140413171033.A771A1D2527@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: pytest-25 Changeset: r70616:c00584626471 Date: 2014-04-12 22:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c00584626471/ Log: sync pytest.py as well and kill pytest_cov.py diff --git a/pytest.py b/pytest.py --- a/pytest.py +++ b/pytest.py @@ -1,49 +1,19 @@ #!/usr/bin/env python +# PYTHON_ARGCOMPLETE_OK """ -PyPy Test runner interface --------------------------- - -Running pytest.py starts py.test, the testing tool -we use in PyPy. It is distributed along with PyPy, -but you may get more information about it at -http://pytest.org/. - -Note that it makes no sense to run all tests at once. -You need to pick a particular subdirectory and run - - cd pypy/.../test - ../../../pytest.py [options] - -For more information, use pytest.py -h. +pytest: unit and functional testing with Python. """ __all__ = ['main'] -# XXX hack for win64: -# This patch must stay here until the END OF STAGE 1 -# When all tests work, this branch will be merged -# and the branch stage 2 is started, where we remove this patch. -import sys -if hasattr(sys, "maxint") and hasattr(sys, "maxsize"): - if sys.maxint != sys.maxsize: - sys.maxint = sys.maxsize - import warnings - warnings.warn("""\n ----> This win64 port is now in stage 1: sys.maxint was modified. ----> When pypy/__init__.py becomes empty again, we have reached stage 2. -""") +if __name__ == '__main__': # if run as a script or by 'python -m pytest' + # we trigger the below "else" condition by the following import + import pytest + raise SystemExit(pytest.main()) -from _pytest.core import main, UsageError, _preloadplugins -from _pytest import core as cmdline +# else we are imported + +from _pytest.config import main, UsageError, _preloadplugins, cmdline from _pytest import __version__ -if __name__ == '__main__': # if run as a script or by 'python -m pytest' - import os - if len(sys.argv) == 1 and os.path.dirname(sys.argv[0]) in '.': - print >> sys.stderr, __doc__ - sys.exit(2) +_preloadplugins() # to populate pytest.* namespace so help(pytest) works - #XXX: sync to upstream later - import pytest_cov - raise SystemExit(main(plugins=[pytest_cov])) -else: - _preloadplugins() # to populate pytest.* namespace so help(pytest) works diff --git a/pytest_cov.py b/pytest_cov.py deleted file mode 100644 --- a/pytest_cov.py +++ /dev/null @@ -1,353 +0,0 @@ -"""produce code coverage reports using the 'coverage' package, including support for distributed testing. - -This plugin produces coverage reports. It supports centralised testing and distributed testing in -both load and each modes. It also supports coverage of subprocesses. - -All features offered by the coverage package should be available, either through pytest-cov or -through coverage's config file. - - -Installation ------------- - -The `pytest-cov`_ package may be installed with pip or easy_install:: - - pip install pytest-cov - easy_install pytest-cov - -.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/ - - -Uninstallation --------------- - -Uninstalling packages is supported by pip:: - - pip uninstall pytest-cov - -However easy_install does not provide an uninstall facility. - -.. IMPORTANT:: - - Ensure that you manually delete the init_cov_core.pth file in your site-packages directory. - - This file starts coverage collection of subprocesses if appropriate during site initialisation - at python startup. - - -Usage ------ - -Centralised Testing -~~~~~~~~~~~~~~~~~~~ - -Centralised testing will report on the combined coverage of the main process and all of it's -subprocesses. - -Running centralised testing:: - - py.test --cov myproj tests/ - -Shows a terminal report:: - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Distributed Testing: Load -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Distributed testing with dist mode set to load will report on the combined coverage of all slaves. -The slaves may be spread out over any number of hosts and each slave may be located anywhere on the -file system. Each slave will have it's subprocesses measured. - -Running distributed testing with dist mode set to load:: - - py.test --cov myproj -n 2 tests/ - -Shows a terminal report:: - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Again but spread over different hosts and different directories:: - - py.test --cov myproj --dist load - --tx ssh=memedough at host1//chdir=testenv1 - --tx ssh=memedough at host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python - --rsyncdir myproj --rsyncdir tests --rsync examples - tests/ - -Shows a terminal report:: - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Distributed Testing: Each -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Distributed testing with dist mode set to each will report on the combined coverage of all slaves. -Since each slave is running all tests this allows generating a combined coverage report for multiple -environments. - -Running distributed testing with dist mode set to each:: - - py.test --cov myproj --dist each - --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python - --tx ssh=memedough at host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python - --rsyncdir myproj --rsyncdir tests --rsync examples - tests/ - -Shows a terminal report:: - - ---------------------------------------- coverage ---------------------------------------- - platform linux2, python 2.6.5-final-0 - platform linux2, python 2.7.0-final-0 - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Reporting ---------- - -It is possible to generate any combination of the reports for a single test run. - -The available reports are terminal (with or without missing line numbers shown), HTML, XML and -annotated source code. - -The terminal report without line numbers (default):: - - py.test --cov-report term --cov myproj tests/ - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -The terminal report with line numbers:: - - py.test --cov-report term-missing --cov myproj tests/ - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover Missing - -------------------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370 - myproj/feature4286 94 7 92% 183-188, 197 - -------------------------------------------------- - TOTAL 353 20 94% - - -The remaining three reports output to files without showing anything on the terminal (useful for -when the output is going to a continuous integration server):: - - py.test --cov-report html - --cov-report xml - --cov-report annotate - --cov myproj tests/ - - -Coverage Data File ------------------- - -The data file is erased at the beginning of testing to ensure clean data for each test run. - -The data file is left at the end of testing so that it is possible to use normal coverage tools to -examine it. - - -Coverage Config File --------------------- - -This plugin provides a clean minimal set of command line options that are added to pytest. For -further control of coverage use a coverage config file. - -For example if tests are contained within the directory tree being measured the tests may be -excluded if desired by using a .coveragerc file with the omit option set:: - - py.test --cov-config .coveragerc - --cov myproj - myproj/tests/ - -Where the .coveragerc file contains file globs:: - - [run] - omit = tests/* - -For full details refer to the `coverage config file`_ documentation. - -.. _`coverage config file`: http://nedbatchelder.com/code/coverage/config.html - -Note that this plugin controls some options and setting the option in the config file will have no -effect. These include specifying source to be measured (source option) and all data file handling -(data_file and parallel options). - - -Limitations ------------ - -For distributed testing the slaves must have the pytest-cov package installed. This is needed since -the plugin must be registered through setuptools / distribute for pytest to start the plugin on the -slave. - -For subprocess measurement environment variables must make it from the main process to the -subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must -do normal site initialisation so that the environment variables can be detected and coverage -started. - - -Acknowledgements ----------------- - -Whilst this plugin has been built fresh from the ground up it has been influenced by the work done -on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are -other coverage plugins. - -Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs. - -Holger Krekel for pytest with its distributed testing support. - -Jason Pellerin for nose. - -Michael Foord for unittest2. - -No doubt others have contributed to these tools as well. -""" - - -def pytest_addoption(parser): - """Add options to control coverage.""" - - group = parser.getgroup('coverage reporting with distributed testing support') - group.addoption('--cov', action='append', default=[], metavar='path', - dest='cov_source', - help='measure coverage for filesystem path (multi-allowed)') - group.addoption('--cov-report', action='append', default=[], metavar='type', - choices=['term', 'term-missing', 'annotate', 'html', 'xml'], - dest='cov_report', - help='type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)') - group.addoption('--cov-config', action='store', default='.coveragerc', metavar='path', - dest='cov_config', - help='config file for coverage, default: .coveragerc') - - -def pytest_configure(config): - """Activate coverage plugin if appropriate.""" - - if config.getvalue('cov_source'): - config.pluginmanager.register(CovPlugin(), '_cov') - - -class CovPlugin(object): - """Use coverage package to produce code coverage reports. - - Delegates all work to a particular implementation based on whether - this test process is centralised, a distributed master or a - distributed slave. - """ - - def __init__(self): - """Creates a coverage pytest plugin. - - We read the rc file that coverage uses to get the data file - name. This is needed since we give coverage through it's API - the data file name. - """ - - # Our implementation is unknown at this time. - self.cov_controller = None - - def pytest_sessionstart(self, session): - """At session start determine our implementation and delegate to it.""" - - import cov_core - - cov_source = session.config.getvalue('cov_source') - cov_report = session.config.getvalue('cov_report') or ['term'] - cov_config = session.config.getvalue('cov_config') - - session_name = session.__class__.__name__ - is_master = (session.config.pluginmanager.hasplugin('dsession') or - session_name == 'DSession') - is_slave = (hasattr(session.config, 'slaveinput') or - session_name == 'SlaveSession') - nodeid = None - - if is_master: - controller_cls = cov_core.DistMaster - elif is_slave: - controller_cls = cov_core.DistSlave - nodeid = session.config.slaveinput.get('slaveid', getattr(session, 'nodeid')) - else: - controller_cls = cov_core.Central - - self.cov_controller = controller_cls(cov_source, - cov_report, - cov_config, - session.config, - nodeid) - - self.cov_controller.start() - - def pytest_configure_node(self, node): - """Delegate to our implementation.""" - - self.cov_controller.configure_node(node) - pytest_configure_node.optionalhook = True - - def pytest_testnodedown(self, node, error): - """Delegate to our implementation.""" - - self.cov_controller.testnodedown(node, error) - pytest_testnodedown.optionalhook = True - - def pytest_sessionfinish(self, session, exitstatus): - """Delegate to our implementation.""" - - self.cov_controller.finish() - - def pytest_terminal_summary(self, terminalreporter): - """Delegate to our implementation.""" - - self.cov_controller.summary(terminalreporter._tw) - - -def pytest_funcarg__cov(request): - """A pytest funcarg that provides access to the underlying coverage object.""" - - # Check with hasplugin to avoid getplugin exception in older pytest. - if request.config.pluginmanager.hasplugin('_cov'): - plugin = request.config.pluginmanager.getplugin('_cov') - if plugin.cov_controller: - return plugin.cov_controller.cov - return None From noreply at buildbot.pypy.org Sun Apr 13 20:01:45 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 13 Apr 2014 20:01:45 +0200 (CEST) Subject: [pypy-commit] pypy default: make sure that cppyy tests are skipped properly Message-ID: <20140413180146.0333B1C3397@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r70617:62b426245097 Date: 2014-04-13 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/62b426245097/ Log: make sure that cppyy tests are skipped properly diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,5 +1,6 @@ import py + at py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: py.test.skip("genreflex is not installed") From noreply at buildbot.pypy.org Sun Apr 13 20:11:25 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 13 Apr 2014 20:11:25 +0200 (CEST) Subject: [pypy-commit] pypy default: argparse compatibility Message-ID: <20140413181125.881AF1D2527@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r70618:b99cd90609fe Date: 2014-04-13 19:10 +0100 http://bitbucket.org/pypy/pypy/changeset/b99cd90609fe/ Log: argparse compatibility diff --git a/rpython/jit/backend/conftest.py b/rpython/jit/backend/conftest.py --- a/rpython/jit/backend/conftest.py +++ b/rpython/jit/backend/conftest.py @@ -6,7 +6,7 @@ def pytest_addoption(parser): group = parser.getgroup('random test options') - group.addoption('--random-seed', action="store", type="int", + group.addoption('--random-seed', action="store", type=int, default=random.randrange(0, 10000), dest="randomseed", help="choose a fixed random seed") @@ -15,19 +15,19 @@ choices=['llgraph', 'cpu'], dest="backend", help="select the backend to run the functions with") - group.addoption('--block-length', action="store", type="int", + group.addoption('--block-length', action="store", type=int, default=30, dest="block_length", help="insert up to this many operations in each test") - group.addoption('--n-vars', action="store", type="int", + group.addoption('--n-vars', action="store", type=int, default=10, dest="n_vars", help="supply this many randomly-valued arguments to " "the function") - group.addoption('--repeat', action="store", type="int", + group.addoption('--repeat', action="store", type=int, default=15, dest="repeat", help="run the test this many times"), - group.addoption('--output', '-O', action="store", type="str", + group.addoption('--output', '-O', action="store", type=str, default="", dest="output", help="dump output to a file") From noreply at buildbot.pypy.org Sun Apr 13 20:32:50 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 13 Apr 2014 20:32:50 +0200 (CEST) Subject: [pypy-commit] pypy pytest-25: reinstate pytest_cov.py Message-ID: <20140413183250.8FFFC1D2527@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: pytest-25 Changeset: r70619:83c9ff0c0206 Date: 2014-04-13 19:31 +0100 http://bitbucket.org/pypy/pypy/changeset/83c9ff0c0206/ Log: reinstate pytest_cov.py diff --git a/pytest_cov.py b/pytest_cov.py new file mode 100644 --- /dev/null +++ b/pytest_cov.py @@ -0,0 +1,353 @@ +"""produce code coverage reports using the 'coverage' package, including support for distributed testing. + +This plugin produces coverage reports. It supports centralised testing and distributed testing in +both load and each modes. It also supports coverage of subprocesses. + +All features offered by the coverage package should be available, either through pytest-cov or +through coverage's config file. + + +Installation +------------ + +The `pytest-cov`_ package may be installed with pip or easy_install:: + + pip install pytest-cov + easy_install pytest-cov + +.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/ + + +Uninstallation +-------------- + +Uninstalling packages is supported by pip:: + + pip uninstall pytest-cov + +However easy_install does not provide an uninstall facility. + +.. IMPORTANT:: + + Ensure that you manually delete the init_cov_core.pth file in your site-packages directory. + + This file starts coverage collection of subprocesses if appropriate during site initialisation + at python startup. + + +Usage +----- + +Centralised Testing +~~~~~~~~~~~~~~~~~~~ + +Centralised testing will report on the combined coverage of the main process and all of it's +subprocesses. + +Running centralised testing:: + + py.test --cov myproj tests/ + +Shows a terminal report:: + + -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- + Name Stmts Miss Cover + ---------------------------------------- + myproj/__init__ 2 0 100% + myproj/myproj 257 13 94% + myproj/feature4286 94 7 92% + ---------------------------------------- + TOTAL 353 20 94% + + +Distributed Testing: Load +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Distributed testing with dist mode set to load will report on the combined coverage of all slaves. +The slaves may be spread out over any number of hosts and each slave may be located anywhere on the +file system. Each slave will have it's subprocesses measured. + +Running distributed testing with dist mode set to load:: + + py.test --cov myproj -n 2 tests/ + +Shows a terminal report:: + + -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- + Name Stmts Miss Cover + ---------------------------------------- + myproj/__init__ 2 0 100% + myproj/myproj 257 13 94% + myproj/feature4286 94 7 92% + ---------------------------------------- + TOTAL 353 20 94% + + +Again but spread over different hosts and different directories:: + + py.test --cov myproj --dist load + --tx ssh=memedough at host1//chdir=testenv1 + --tx ssh=memedough at host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python + --rsyncdir myproj --rsyncdir tests --rsync examples + tests/ + +Shows a terminal report:: + + -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- + Name Stmts Miss Cover + ---------------------------------------- + myproj/__init__ 2 0 100% + myproj/myproj 257 13 94% + myproj/feature4286 94 7 92% + ---------------------------------------- + TOTAL 353 20 94% + + +Distributed Testing: Each +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Distributed testing with dist mode set to each will report on the combined coverage of all slaves. +Since each slave is running all tests this allows generating a combined coverage report for multiple +environments. + +Running distributed testing with dist mode set to each:: + + py.test --cov myproj --dist each + --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python + --tx ssh=memedough at host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python + --rsyncdir myproj --rsyncdir tests --rsync examples + tests/ + +Shows a terminal report:: + + ---------------------------------------- coverage ---------------------------------------- + platform linux2, python 2.6.5-final-0 + platform linux2, python 2.7.0-final-0 + Name Stmts Miss Cover + ---------------------------------------- + myproj/__init__ 2 0 100% + myproj/myproj 257 13 94% + myproj/feature4286 94 7 92% + ---------------------------------------- + TOTAL 353 20 94% + + +Reporting +--------- + +It is possible to generate any combination of the reports for a single test run. + +The available reports are terminal (with or without missing line numbers shown), HTML, XML and +annotated source code. + +The terminal report without line numbers (default):: + + py.test --cov-report term --cov myproj tests/ + + -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- + Name Stmts Miss Cover + ---------------------------------------- + myproj/__init__ 2 0 100% + myproj/myproj 257 13 94% + myproj/feature4286 94 7 92% + ---------------------------------------- + TOTAL 353 20 94% + + +The terminal report with line numbers:: + + py.test --cov-report term-missing --cov myproj tests/ + + -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- + Name Stmts Miss Cover Missing + -------------------------------------------------- + myproj/__init__ 2 0 100% + myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370 + myproj/feature4286 94 7 92% 183-188, 197 + -------------------------------------------------- + TOTAL 353 20 94% + + +The remaining three reports output to files without showing anything on the terminal (useful for +when the output is going to a continuous integration server):: + + py.test --cov-report html + --cov-report xml + --cov-report annotate + --cov myproj tests/ + + +Coverage Data File +------------------ + +The data file is erased at the beginning of testing to ensure clean data for each test run. + +The data file is left at the end of testing so that it is possible to use normal coverage tools to +examine it. + + +Coverage Config File +-------------------- + +This plugin provides a clean minimal set of command line options that are added to pytest. For +further control of coverage use a coverage config file. + +For example if tests are contained within the directory tree being measured the tests may be +excluded if desired by using a .coveragerc file with the omit option set:: + + py.test --cov-config .coveragerc + --cov myproj + myproj/tests/ + +Where the .coveragerc file contains file globs:: + + [run] + omit = tests/* + +For full details refer to the `coverage config file`_ documentation. + +.. _`coverage config file`: http://nedbatchelder.com/code/coverage/config.html + +Note that this plugin controls some options and setting the option in the config file will have no +effect. These include specifying source to be measured (source option) and all data file handling +(data_file and parallel options). + + +Limitations +----------- + +For distributed testing the slaves must have the pytest-cov package installed. This is needed since +the plugin must be registered through setuptools / distribute for pytest to start the plugin on the +slave. + +For subprocess measurement environment variables must make it from the main process to the +subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must +do normal site initialisation so that the environment variables can be detected and coverage +started. + + +Acknowledgements +---------------- + +Whilst this plugin has been built fresh from the ground up it has been influenced by the work done +on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are +other coverage plugins. + +Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs. + +Holger Krekel for pytest with its distributed testing support. + +Jason Pellerin for nose. + +Michael Foord for unittest2. + +No doubt others have contributed to these tools as well. +""" + + +def pytest_addoption(parser): + """Add options to control coverage.""" + + group = parser.getgroup('coverage reporting with distributed testing support') + group.addoption('--cov', action='append', default=[], metavar='path', + dest='cov_source', + help='measure coverage for filesystem path (multi-allowed)') + group.addoption('--cov-report', action='append', default=[], metavar='type', + choices=['term', 'term-missing', 'annotate', 'html', 'xml'], + dest='cov_report', + help='type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)') + group.addoption('--cov-config', action='store', default='.coveragerc', metavar='path', + dest='cov_config', + help='config file for coverage, default: .coveragerc') + + +def pytest_configure(config): + """Activate coverage plugin if appropriate.""" + + if config.getvalue('cov_source'): + config.pluginmanager.register(CovPlugin(), '_cov') + + +class CovPlugin(object): + """Use coverage package to produce code coverage reports. + + Delegates all work to a particular implementation based on whether + this test process is centralised, a distributed master or a + distributed slave. + """ + + def __init__(self): + """Creates a coverage pytest plugin. + + We read the rc file that coverage uses to get the data file + name. This is needed since we give coverage through it's API + the data file name. + """ + + # Our implementation is unknown at this time. + self.cov_controller = None + + def pytest_sessionstart(self, session): + """At session start determine our implementation and delegate to it.""" + + import cov_core + + cov_source = session.config.getvalue('cov_source') + cov_report = session.config.getvalue('cov_report') or ['term'] + cov_config = session.config.getvalue('cov_config') + + session_name = session.__class__.__name__ + is_master = (session.config.pluginmanager.hasplugin('dsession') or + session_name == 'DSession') + is_slave = (hasattr(session.config, 'slaveinput') or + session_name == 'SlaveSession') + nodeid = None + + if is_master: + controller_cls = cov_core.DistMaster + elif is_slave: + controller_cls = cov_core.DistSlave + nodeid = session.config.slaveinput.get('slaveid', getattr(session, 'nodeid')) + else: + controller_cls = cov_core.Central + + self.cov_controller = controller_cls(cov_source, + cov_report, + cov_config, + session.config, + nodeid) + + self.cov_controller.start() + + def pytest_configure_node(self, node): + """Delegate to our implementation.""" + + self.cov_controller.configure_node(node) + pytest_configure_node.optionalhook = True + + def pytest_testnodedown(self, node, error): + """Delegate to our implementation.""" + + self.cov_controller.testnodedown(node, error) + pytest_testnodedown.optionalhook = True + + def pytest_sessionfinish(self, session, exitstatus): + """Delegate to our implementation.""" + + self.cov_controller.finish() + + def pytest_terminal_summary(self, terminalreporter): + """Delegate to our implementation.""" + + self.cov_controller.summary(terminalreporter._tw) + + +def pytest_funcarg__cov(request): + """A pytest funcarg that provides access to the underlying coverage object.""" + + # Check with hasplugin to avoid getplugin exception in older pytest. + if request.config.pluginmanager.hasplugin('_cov'): + plugin = request.config.pluginmanager.getplugin('_cov') + if plugin.cov_controller: + return plugin.cov_controller.cov + return None From noreply at buildbot.pypy.org Sun Apr 13 20:32:51 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 13 Apr 2014 20:32:51 +0200 (CEST) Subject: [pypy-commit] pypy pytest-25: hg merge default Message-ID: <20140413183251.C06C71D2527@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: pytest-25 Changeset: r70620:7d24a35ccfef Date: 2014-04-13 19:32 +0100 http://bitbucket.org/pypy/pypy/changeset/7d24a35ccfef/ Log: hg merge default diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,5 +1,6 @@ import py + at py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: py.test.skip("genreflex is not installed") diff --git a/rpython/jit/backend/conftest.py b/rpython/jit/backend/conftest.py --- a/rpython/jit/backend/conftest.py +++ b/rpython/jit/backend/conftest.py @@ -6,7 +6,7 @@ def pytest_addoption(parser): group = parser.getgroup('random test options') - group.addoption('--random-seed', action="store", type="int", + group.addoption('--random-seed', action="store", type=int, default=random.randrange(0, 10000), dest="randomseed", help="choose a fixed random seed") @@ -15,19 +15,19 @@ choices=['llgraph', 'cpu'], dest="backend", help="select the backend to run the functions with") - group.addoption('--block-length', action="store", type="int", + group.addoption('--block-length', action="store", type=int, default=30, dest="block_length", help="insert up to this many operations in each test") - group.addoption('--n-vars', action="store", type="int", + group.addoption('--n-vars', action="store", type=int, default=10, dest="n_vars", help="supply this many randomly-valued arguments to " "the function") - group.addoption('--repeat', action="store", type="int", + group.addoption('--repeat', action="store", type=int, default=15, dest="repeat", help="run the test this many times"), - group.addoption('--output', '-O', action="store", type="str", + group.addoption('--output', '-O', action="store", type=str, default="", dest="output", help="dump output to a file") From noreply at buildbot.pypy.org Sun Apr 13 23:33:39 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 13 Apr 2014 23:33:39 +0200 (CEST) Subject: [pypy-commit] pypy py3k: help Popen find executable on win32 Message-ID: <20140413213339.487191C3504@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: py3k Changeset: r70621:590ba2c6714c Date: 2014-04-14 00:24 +0300 http://bitbucket.org/pypy/pypy/changeset/590ba2c6714c/ Log: help Popen find executable on win32 diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -16,6 +16,8 @@ shell_default = True def _run(executable, args, env, cwd): # unless overridden below + if sys.platform == 'win32': + executable = executable.replace('/','\\') if isinstance(args, str): args = str(executable) + ' ' + args shell = True From noreply at buildbot.pypy.org Sun Apr 13 23:33:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 13 Apr 2014 23:33:40 +0200 (CEST) Subject: [pypy-commit] pypy py3k: avoid running in an exception handler, since raising will chain exceptions Message-ID: <20140413213340.894591C3504@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: py3k Changeset: r70622:1ddaf3502eda Date: 2014-04-14 00:32 +0300 http://bitbucket.org/pypy/pypy/changeset/1ddaf3502eda/ Log: avoid running in an exception handler, since raising will chain exceptions diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -25,6 +25,7 @@ except ImportError: pass # + run_interactive = run_simple_interactive_console try: if not os.isatty(sys.stdin.fileno()): # Bail out if stdin is not tty-like, as pyrepl wouldn't be happy @@ -35,13 +36,12 @@ if not check(): raise ImportError from pyrepl.simple_interact import run_multiline_interactive_console + run_imteractive = run_multiline_interactive_console except ImportError: - run_simple_interactive_console(mainmodule) + pass except SyntaxError: print("Warning: 'import pyrepl' failed with SyntaxError") - run_simple_interactive_console(mainmodule) - else: - run_multiline_interactive_console(mainmodule) + run_interactive(mainmodule) def run_simple_interactive_console(mainmodule): import code From noreply at buildbot.pypy.org Sun Apr 13 23:38:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 13 Apr 2014 23:38:56 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: prefer oefmt Message-ID: <20140413213856.20B011C3504@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r70623:14f65b3bbd6a Date: 2014-04-13 14:36 -0700 http://bitbucket.org/pypy/pypy/changeset/14f65b3bbd6a/ Log: prefer oefmt diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -175,8 +175,7 @@ else: timeout = space.float_w(w_timeout) if timeout < 0.0: - raise OperationError(space.w_ValueError, - space.wrap("timeout must be non-negative")) + raise oefmt(space.w_ValueError, "timeout must be non-negative") ll_inl = lltype.nullptr(_c.fd_set.TO) ll_outl = lltype.nullptr(_c.fd_set.TO) From noreply at buildbot.pypy.org Sun Apr 13 23:38:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 13 Apr 2014 23:38:57 +0200 (CEST) Subject: [pypy-commit] pypy py3k: typo Message-ID: <20140413213857.63F331C3504@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70624:c671eb068b1e Date: 2014-04-13 14:38 -0700 http://bitbucket.org/pypy/pypy/changeset/c671eb068b1e/ Log: typo diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -36,7 +36,7 @@ if not check(): raise ImportError from pyrepl.simple_interact import run_multiline_interactive_console - run_imteractive = run_multiline_interactive_console + run_interactive = run_multiline_interactive_console except ImportError: pass except SyntaxError: From noreply at buildbot.pypy.org Mon Apr 14 09:58:48 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 14 Apr 2014 09:58:48 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: correctly recursively generate guards for arrays Message-ID: <20140414075848.856091C0433@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70625:4cbb8585a69f Date: 2014-04-13 12:03 +0200 http://bitbucket.org/pypy/pypy/changeset/4cbb8585a69f/ Log: correctly recursively generate guards for arrays diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -9,7 +9,7 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ equaloplists from rpython.jit.metainterp.optimizeopt.intutils import IntBound -from rpython.jit.metainterp.optimizeopt.virtualize import VirtualValue +from rpython.jit.metainterp.optimizeopt.virtualize import VirtualValue, VArrayValue from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import ResOperation, rop @@ -452,7 +452,34 @@ self.cpu) - def test_generate_guards_on_virtual_fields_matches(self): + def test_generate_guards_on_virtual_fields_matches_array(self): + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + descr = object() + + info1 = VArrayStateInfo(descr) + info1.fieldstate = [innerinfo1] + + info2 = VArrayStateInfo(descr) + info2.fieldstate = [innerinfo2] + + value1 = VArrayValue(descr, None, 1, self.nodebox) + value1._items[0] = OptValue(self.nodebox) + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected, [self.nodebox]) + + def test_generate_guards_on_virtual_fields_matches_struct(self): innervalue1 = OptValue(self.nodebox) constclassbox = self.cpu.ts.cls_of_box(self.nodebox) innervalue1.make_constant_class(constclassbox, -1) @@ -477,6 +504,7 @@ """ self.guards(info1, info2, value1, expected, [self.nodebox]) + # _________________________________________________________________________ # the below tests don't really have anything to do with guard generation diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -187,10 +187,13 @@ raise VirtualStatesCantMatch("other is a different kind of array") if len(self.fieldstate) != len(other.fieldstate): raise VirtualStatesCantMatch("other has a different length") + v = None for i in range(len(self.fieldstate)): - # XXX value + if value is not None: + assert isinstance(value, virtualize.VArrayValue) + v = value._items[i] self.fieldstate[i].generate_guards(other.fieldstate[i], - None, state) + v, state) def enum_forced_boxes(self, boxes, value, optimizer): if not isinstance(value, virtualize.VArrayValue): From noreply at buildbot.pypy.org Mon Apr 14 09:58:49 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 14 Apr 2014 09:58:49 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: fix translation Message-ID: <20140414075849.A5E3B1C0433@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70626:def993be0700 Date: 2014-04-13 13:12 +0200 http://bitbucket.org/pypy/pypy/changeset/def993be0700/ Log: fix translation diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -1,5 +1,6 @@ from rpython.jit.metainterp import resume -from rpython.jit.metainterp.history import BoxInt, ConstInt, BoxPtr, Const, ConstPtr +from rpython.jit.metainterp.history import (BoxInt, ConstInt, BoxPtr, Const, + ConstPtr, ConstFloat) from rpython.jit.metainterp.optimizeopt import virtualize from rpython.jit.metainterp.optimizeopt.intutils import IntUnbounded from rpython.jit.metainterp.optimizeopt.optimizer import (LEVEL_CONSTANT, @@ -435,7 +436,7 @@ l = "ConstPtr(null)" else: assert isinstance(const, ConstFloat) - l = "ConstFloat(%s)" % cons.getfloat() + l = "ConstFloat(%s)" % const.getfloat() lb = '' if self.lenbound: From noreply at buildbot.pypy.org Mon Apr 14 09:58:50 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 14 Apr 2014 09:58:50 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: actually use the debug message Message-ID: <20140414075850.DBDB41C0433@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70627:997dc027f625 Date: 2014-04-13 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/997dc027f625/ Log: actually use the debug message diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -533,7 +533,6 @@ continue extra_guards = [] - debugmsg = 'Did not match ' try: cpu = self.optimizer.cpu state = target.virtual_state.generate_guards(virtual_state, @@ -546,6 +545,7 @@ else: debugmsg = 'Matched ' except VirtualStatesCantMatch, e: + debugmsg = 'Did not match:\n%s\n' % (e.msg, ) target.virtual_state.debug_print(debugmsg, e.state.bad, metainterp_sd=self.optimizer.metainterp_sd) continue From noreply at buildbot.pypy.org Mon Apr 14 09:58:52 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 14 Apr 2014 09:58:52 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: remove superfluous list Message-ID: <20140414075852.00D641C0433@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70628:d3bf324cae70 Date: 2014-04-13 13:32 +0200 http://bitbucket.org/pypy/pypy/changeset/d3bf324cae70/ Log: remove superfluous list diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -406,8 +406,8 @@ def _enum(self, virtual_state): if self.level == LEVEL_CONSTANT: return - self.position_in_notvirtuals = len(virtual_state.notvirtuals) - virtual_state.notvirtuals.append(self) + self.position_in_notvirtuals = virtual_state.numnotvirtuals + virtual_state.numnotvirtuals += 1 def debug_print(self, indent, seen, bad, metainterp_sd=None): mark = '' @@ -450,7 +450,7 @@ def __init__(self, state): self.state = state self.info_counter = -1 - self.notvirtuals = [] # FIXME: We dont need this list, only it's length + self.numnotvirtuals = 0 for s in state: s.enum(self) @@ -476,7 +476,7 @@ if optimizer.optearlyforce: optimizer = optimizer.optearlyforce assert len(values) == len(self.state) - inputargs = [None] * len(self.notvirtuals) + inputargs = [None] * self.numnotvirtuals # We try twice. The first time around we allow boxes to be forced # which might change the virtual state if the box appear in more From noreply at buildbot.pypy.org Mon Apr 14 14:58:22 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 14 Apr 2014 14:58:22 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: improve a test, enable buffered test Message-ID: <20140414125822.A57DA1C01F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70629:ba9de3154a7b Date: 2014-04-14 14:59 +0300 http://bitbucket.org/pypy/pypy/changeset/ba9de3154a7b/ Log: improve a test, enable buffered test diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -464,7 +464,7 @@ class NonWritableSlice(SliceArray): def descr_setitem(self, space, orig_array, w_index, w_value): raise OperationError(space.w_ValueError, space.wrap( - "Assignment destination is read-only")) + "assignment destination is read-only")) class VoidBoxStorage(BaseConcreteArray): def __init__(self, size, dtype): diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -52,13 +52,9 @@ n += 1 assert n == 12 assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() - e = None - try: - r[0][0] = 0 - except ValueError, ex: - e = ex - assert e - + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + def test_index(self): from numpy import arange, nditer a = arange(6).reshape(2,3) @@ -118,12 +114,8 @@ def test_buffered(self): from numpy import arange, nditer, array - import sys a = arange(6).reshape(2,3) r = [] - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, "nditer(a, flags=['external_loop', 'buffered'], order='F')") - skip('nditer buffered flag not implmented') for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): r.append(x) array_r = array(r) From noreply at buildbot.pypy.org Mon Apr 14 14:58:23 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 14 Apr 2014 14:58:23 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: cleanup, add failing test that proves refactoring is needed Message-ID: <20140414125823.E43E81C01F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70630:f0bb0496eeef Date: 2014-04-14 15:56 +0300 http://bitbucket.org/pypy/pypy/changeset/f0bb0496eeef/ Log: cleanup, add failing test that proves refactoring is needed diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -189,28 +189,6 @@ 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' 'multi-index is being tracked')) -def get_iter(space, order, arr, shape): - imp = arr.implementation - if order == 'K' or (order == 'C' and imp.order == 'C'): - backward = False - elif order =='F' and imp.order == 'C': - backward = True - else: - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - if (imp.strides[0] < imp.strides[-1] and not backward) or \ - (imp.strides[0] > imp.strides[-1] and backward): - # flip the strides. Is this always true for multidimension? - strides = [imp.strides[i] for i in range(len(imp.strides) - 1, -1, -1)] - backstrides = [imp.backstrides[i] for i in range(len(imp.backstrides) - 1, -1, -1)] - shape = [imp.shape[i] for i in range(len(imp.shape) - 1, -1, -1)] - else: - strides = imp.strides - backstrides = imp.backstrides - r = calculate_broadcast_strides(strides, backstrides, imp.shape, - shape, backward) - return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) - def is_backward(imp, order): if order == 'K' or (order == 'C' and imp.order == 'C'): return False @@ -219,11 +197,28 @@ else: raise NotImplementedError('not implemented yet') +def get_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + def get_external_loop_iter(space, order, arr, shape): imp = arr.implementation - backward = is_backward(imp, order) - return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) def convert_to_array_or_none(space, w_elem): diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -54,7 +54,14 @@ assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() e = raises(ValueError, 'r[0][0] = 0') assert str(e.value) == 'assignment destination is read-only' - + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + def test_index(self): from numpy import arange, nditer a = arange(6).reshape(2,3) From noreply at buildbot.pypy.org Mon Apr 14 17:57:59 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 14 Apr 2014 17:57:59 +0200 (CEST) Subject: [pypy-commit] pypy pytest-25: Fix creation and use of 'interplevel' and 'applevel' markers. Message-ID: <20140414155759.3BDBB1C0433@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: pytest-25 Changeset: r70631:50fea73fc1fe Date: 2014-04-14 16:56 +0100 http://bitbucket.org/pypy/pypy/changeset/50fea73fc1fe/ Log: Fix creation and use of 'interplevel' and 'applevel' markers. Filtering on markers needs '-m' option, not '-k'. diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py --- a/pypy/tool/pytest/apptest.py +++ b/pypy/tool/pytest/apptest.py @@ -21,11 +21,12 @@ def __init__(self, excinfo): self.excinfo = excinfo +marker = py.test.mark.applevel class AppTestFunction(py.test.collect.Function): def __init__(self, *args, **kwargs): super(AppTestFunction, self).__init__(*args, **kwargs) - self.keywords['applevel'] = True + self._request.applymarker(marker) def _prunetraceback(self, traceback): return traceback @@ -116,13 +117,6 @@ class AppClassCollector(PyPyClassCollector): Instance = AppClassInstance - def _haskeyword(self, keyword): - return keyword == 'applevel' or \ - super(AppClassCollector, self)._haskeyword(keyword) - - def _keywords(self): - return super(AppClassCollector, self)._keywords() + ['applevel'] - def setup(self): super(AppClassCollector, self).setup() cls = self.obj diff --git a/pypy/tool/pytest/inttest.py b/pypy/tool/pytest/inttest.py --- a/pypy/tool/pytest/inttest.py +++ b/pypy/tool/pytest/inttest.py @@ -19,10 +19,13 @@ pass +marker = py.test.mark.interplevel + + class IntTestFunction(py.test.collect.Function): def __init__(self, *args, **kwargs): super(IntTestFunction, self).__init__(*args, **kwargs) - self.keywords['interplevel'] = True + self._request.applymarker(marker) def runtest(self): try: @@ -47,11 +50,3 @@ class IntClassCollector(PyPyClassCollector): Instance = IntInstanceCollector - - def _haskeyword(self, keyword): - return (keyword == 'interplevel' or - super(IntClassCollector, self)._haskeyword(keyword)) - - def _keywords(self): - return super(IntClassCollector, self)._keywords() + ['interplevel'] - diff --git a/pypy/tool/pytest/test/test_conftest1.py b/pypy/tool/pytest/test/test_conftest1.py --- a/pypy/tool/pytest/test/test_conftest1.py +++ b/pypy/tool/pytest/test/test_conftest1.py @@ -5,27 +5,26 @@ pytest_plugins = "pytester" class TestPyPyTests: - def test_selection_by_keyword_interp(self, testdir): - sorter = testdir.inline_run("-k", "interplevel", innertest, ) + def test_selection_by_keyword_interp(self, testdir): + sorter = testdir.inline_run("-m", "interplevel", innertest, ) passed, skipped, failed = sorter.listoutcomes() assert len(passed) == 2, len(passed) - assert not skipped and not failed + assert not skipped and not failed assert "test_something" in passed[0].nodeid assert "test_method" in passed[1].nodeid - def test_selection_by_keyword_app(self, testdir): - sorter = testdir.inline_run("-k", "applevel", innertest) + def test_selection_by_keyword_app(self, testdir): + sorter = testdir.inline_run("-m", "applevel", innertest) passed, skipped, failed = sorter.listoutcomes() assert len(passed) == 2 - assert not skipped and not failed + assert not skipped and not failed assert "app_test_something" in passed[0].nodeid assert "test_method_app" in passed[1].nodeid def test_appdirect(self, testdir): - sorter = testdir.inline_run(innertest, '-k', 'applevel', '--runappdirect') + sorter = testdir.inline_run(innertest, '-m', 'applevel', '--runappdirect') passed, skipped, failed = sorter.listoutcomes() assert len(passed) == 2 print passed assert "app_test_something" in passed[0].nodeid assert "test_method_app" in passed[1].nodeid - From noreply at buildbot.pypy.org Mon Apr 14 23:20:54 2014 From: noreply at buildbot.pypy.org (kgriffs) Date: Mon, 14 Apr 2014 23:20:54 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Implement list.clear method Message-ID: <20140414212054.83BCF1D2BAF@cobra.cs.uni-duesseldorf.de> Author: Kurt Griffiths Branch: py3.3 Changeset: r70634:69693d4fab30 Date: 2014-04-14 17:08 -0400 http://bitbucket.org/pypy/pypy/changeset/69693d4fab30/ Log: Implement list.clear method This patch simply adds descr_clear to W_ListObject and has it call the internal clear() method. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -588,6 +588,10 @@ raise OperationError(space.w_IndexError, space.wrap("pop index out of range")) + def descr_clear(self, space): + '''L.clear() -- remove all items''' + self.clear(space) + def descr_remove(self, space, w_value): 'L.remove(value) -- remove first occurrence of value' # needs to be safe against eq_w() mutating the w_list behind our back @@ -1846,6 +1850,7 @@ __reversed__ = interp2app(W_ListObject.descr_reversed), count = interp2app(W_ListObject.descr_count), pop = interp2app(W_ListObject.descr_pop), + clear = interp2app(W_ListObject.descr_clear), extend = interp2app(W_ListObject.extend), insert = interp2app(W_ListObject.descr_insert), remove = interp2app(W_ListObject.descr_remove), diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -814,7 +814,25 @@ l = [1.1, 2.2] del l[:] assert l == [] - + + def test_clear(self): + l = l0 = [1,2,3] + l.clear() + assert l is l0 + assert l == [] + + l = ['a', 'b'] + l.clear() + assert l == [] + + l = [1.1, 2.2] + l.clear() + assert l == [] + + l = [] + l.clear() + assert l == [] + def test_iadd(self): l = l0 = [1,2,3] l += [4,5] From noreply at buildbot.pypy.org Tue Apr 15 00:28:42 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 15 Apr 2014 00:28:42 +0200 (CEST) Subject: [pypy-commit] pyrepl default: tox ini add flake8 Message-ID: <20140414222842.C51E61C0433@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r252:81049c73dc2e Date: 2014-04-15 00:28 +0200 http://bitbucket.org/pypy/pyrepl/changeset/81049c73dc2e/ Log: tox ini add flake8 diff --git a/tox.ini b/tox.ini --- a/tox.ini +++ b/tox.ini @@ -1,8 +1,7 @@ [tox] -envlist = py26, py27, pypy, py33 +envlist = py27, py33, flake8, pypy -[pytest] -codechecks = pep8 pyflakes + [testenv] deps= @@ -10,3 +9,9 @@ pexpect commands= py.test --junitxml={envdir}/junit.xml [] + +[testenv:flake8] +deps = + flake8 + mccabe +commands = flake8 --max-complexity=10 setup.py pyrepl testing pythoni pythoni1 From noreply at buildbot.pypy.org Tue Apr 15 00:58:43 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 15 Apr 2014 00:58:43 +0200 (CEST) Subject: [pypy-commit] pyrepl codecheck-clean: close unmergable branch Message-ID: <20140414225843.DDEB01C030C@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: codecheck-clean Changeset: r253:4583ebb6344a Date: 2014-04-15 00:58 +0200 http://bitbucket.org/pypy/pyrepl/changeset/4583ebb6344a/ Log: close unmergable branch From noreply at buildbot.pypy.org Tue Apr 15 10:28:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 15 Apr 2014 10:28:29 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: translation fixes. one abort() for not implemented code Message-ID: <20140415082829.89D111C1DB6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1158:69b0eac27cb6 Date: 2014-04-14 15:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/69b0eac27cb6/ Log: translation fixes. one abort() for not implemented code diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -279,6 +279,7 @@ static void copy_object_to_shared(object_t *obj, int source_segment_num) { + abort(); /* Only used by major GC. XXX There is a lot of code duplication with synchronize_object_now() but I don't completely see how to improve... @@ -335,7 +336,7 @@ static inline void _synchronize_fragment(stm_char *frag, ssize_t frag_size) { /* First copy the object into the shared page, if needed */ - uintptr_t page = ((uintptr_t)obj) / 4096UL; + uintptr_t page = ((uintptr_t)frag) / 4096UL; char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, frag); char *dst = REAL_ADDRESS(stm_object_pages, frag); @@ -361,7 +362,8 @@ */ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)dst); + ssize_t obj_size = stmcb_size_rounded_up( + (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); OPT_ASSERT(obj_size >= 16); if (LIKELY(is_small_uniform(obj))) { @@ -389,7 +391,7 @@ _synchronize_fragment((stm_char *)start, copy_size); start = copy_up_to; - + } while (start != end); } @@ -434,7 +436,7 @@ else EVENTUALLY(memcmp(dst, src, frag_size) == 0); /* same page */ } - } + } while (j > 0); } static void push_overflow_objects_from_privatized_pages(void) @@ -443,7 +445,8 @@ return; LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_object_now(item)); + synchronize_object_enqueue(item)); + synchronize_objects_flush(); } static void push_modified_to_other_segments(void) @@ -465,9 +468,10 @@ /* copy the object to the shared page, and to the other private pages as needed */ - synchronize_object_now(item); + synchronize_object_enqueue(item); })); + synchronize_objects_flush(); list_clear(STM_PSEGMENT->modified_old_objects); } diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c --- a/c7/stm/smallmalloc.c +++ b/c7/stm/smallmalloc.c @@ -42,13 +42,15 @@ memset(full_pages_object_size, 0, sizeof(full_pages_object_size)); } +static int gmfp_lock = 0; + static void grab_more_free_pages_for_small_allocations(void) { /* Grab GCPAGE_NUM_PAGES pages out of the top addresses. Use the lock of pages.c to prevent any remapping from occurring under our feet. */ - mutex_pages_lock(); + spinlock_acquire(gmfp_lock); if (free_uniform_pages == NULL) { @@ -74,7 +76,7 @@ } } - mutex_pages_unlock(); + spinlock_release(gmfp_lock); return; out_of_memory: From noreply at buildbot.pypy.org Tue Apr 15 11:47:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 15 Apr 2014 11:47:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix test Message-ID: <20140415094757.49C1B1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70635:f0dfe097735a Date: 2014-04-15 11:47 +0200 http://bitbucket.org/pypy/pypy/changeset/f0dfe097735a/ Log: Fix test diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): From noreply at buildbot.pypy.org Tue Apr 15 15:09:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 15 Apr 2014 15:09:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix: submodules of built-in modules had their classes Message-ID: <20140415130924.570511C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70636:6dac6407412f Date: 2014-04-15 15:08 +0200 http://bitbucket.org/pypy/pypy/changeset/6dac6407412f/ Log: Test and fix: submodules of built-in modules had their classes incorrectly reported as belonging to the base module. diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -14,6 +14,7 @@ # after startup(). w_initialdict = None lazy = False + submodule_name = None def __init__(self, space, w_name): """ NOT_RPYTHON """ @@ -31,6 +32,8 @@ space = self.space name = space.unwrap(self.w_name) for sub_name, module_cls in self.submodules.iteritems(): + if module_cls.submodule_name is None: + module_cls.submodule_name = sub_name module_name = space.wrap("%s.%s" % (name, sub_name)) m = module_cls(space, module_name) m.install() @@ -134,6 +137,8 @@ cls.loaders = loaders = {} pkgroot = cls.__module__ appname = cls.get_applevel_name() + if cls.submodule_name is not None: + appname += '.%s' % (cls.submodule_name,) for name, spec in cls.interpleveldefs.items(): loaders[name] = getinterpevalloader(pkgroot, spec) for name, spec in cls.appleveldefs.items(): diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -8,6 +8,7 @@ def test_signal(self): from __pypy__ import thread + assert type(thread.signals_enabled).__module__ == '__pypy__.thread' with thread.signals_enabled: pass # assert did not crash From noreply at buildbot.pypy.org Tue Apr 15 16:00:08 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 15 Apr 2014 16:00:08 +0200 (CEST) Subject: [pypy-commit] pyrepl default: Merged in msabramo/pyrepl/msabramo/improve_test_readline (pull request #8) Message-ID: <20140415140008.92C901C0433@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r256:6ccf74d8d7c1 Date: 2014-04-15 16:00 +0200 http://bitbucket.org/pypy/pyrepl/changeset/6ccf74d8d7c1/ Log: Merged in msabramo/pyrepl/msabramo/improve_test_readline (pull request #8) Make testing/test_readline.py more comprehensive diff --git a/testing/test_readline.py b/testing/test_readline.py --- a/testing/test_readline.py +++ b/testing/test_readline.py @@ -1,6 +1,34 @@ from pyrepl.readline import _ReadlineWrapper import os import pty +import sys + +if sys.version_info < (3, ): + bytes_type = str + unicode_type = unicode +else: + bytes_type = bytes + unicode_type = str + + +def test_readline(): + master, slave = pty.openpty() + readline_wrapper = _ReadlineWrapper(slave, slave) + os.write(master, b'input\n') + + result = readline_wrapper.get_reader().readline() + assert result == b'input' + assert isinstance(result, bytes_type) + + +def test_readline_returns_unicode(): + master, slave = pty.openpty() + readline_wrapper = _ReadlineWrapper(slave, slave) + os.write(master, b'input\n') + + result = readline_wrapper.get_reader().readline(returns_unicode=True) + assert result == 'input' + assert isinstance(result, unicode_type) def test_raw_input(): @@ -8,8 +36,6 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() - #result = readline_wrapper.raw_input('prompt:') - assert result == 'input' - # A bytes string on python2, a unicode string on python3. - assert isinstance(result, str) + result = readline_wrapper.raw_input('prompt:') + assert result == b'input' + assert isinstance(result, bytes_type) From noreply at buildbot.pypy.org Tue Apr 15 16:00:11 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Tue, 15 Apr 2014 16:00:11 +0200 (CEST) Subject: [pypy-commit] pyrepl msabramo/improve_test_readline: Make testing/test_readline.py more comprehensive Message-ID: <20140415140011.1ED6E1C0433@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: msabramo/improve_test_readline Changeset: r254:5c157dca7fd8 Date: 2014-02-28 22:56 +0000 http://bitbucket.org/pypy/pyrepl/changeset/5c157dca7fd8/ Log: Make testing/test_readline.py more comprehensive diff --git a/testing/test_readline.py b/testing/test_readline.py --- a/testing/test_readline.py +++ b/testing/test_readline.py @@ -1,6 +1,34 @@ from pyrepl.readline import _ReadlineWrapper import os import pty +import sys + +if sys.version_info < (3, ): + bytes_type = str + unicode_type = unicode +else: + bytes_type = bytes + unicode_type = str + + +def test_readline(): + master, slave = pty.openpty() + readline_wrapper = _ReadlineWrapper(slave, slave) + os.write(master, b'input\n') + + result = readline_wrapper.get_reader().readline() + assert result == b'input' + assert isinstance(result, bytes_type) + + +def test_readline_returns_unicode(): + master, slave = pty.openpty() + readline_wrapper = _ReadlineWrapper(slave, slave) + os.write(master, b'input\n') + + result = readline_wrapper.get_reader().readline(returns_unicode=True) + assert result == 'input' + assert isinstance(result, unicode_type) def test_raw_input(): @@ -8,8 +36,6 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() - #result = readline_wrapper.raw_input('prompt:') - assert result == 'input' - # A bytes string on python2, a unicode string on python3. - assert isinstance(result, str) + result = readline_wrapper.raw_input('prompt:') + assert result == b'input' + assert isinstance(result, bytes_type) From noreply at buildbot.pypy.org Tue Apr 15 16:00:12 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Tue, 15 Apr 2014 16:00:12 +0200 (CEST) Subject: [pypy-commit] pyrepl msabramo/improve_test_readline: Merge from default to msabramo/improve_test_readline Message-ID: <20140415140012.26DBF1C0433@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: msabramo/improve_test_readline Changeset: r255:2573268feedc Date: 2014-03-09 16:26 +0000 http://bitbucket.org/pypy/pyrepl/changeset/2573268feedc/ Log: Merge from default to msabramo/improve_test_readline diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -3,3 +3,5 @@ .cache/ \.tox/ .*\.egg-info +\.pyc +\.swp diff --git a/pyrepl/unix_eventqueue.py b/pyrepl/unix_eventqueue.py --- a/pyrepl/unix_eventqueue.py +++ b/pyrepl/unix_eventqueue.py @@ -100,7 +100,8 @@ self.events.append(event) def push(self, char): - self.buf.append(ord(char)) + ord_char = char if isinstance(char, int) else ord(char) + self.buf.append(ord_char) if char in self.k: if self.k is self.ck: #sanity check, buffer is empty when a special key comes diff --git a/testing/test_functional.py b/testing/test_functional.py --- a/testing/test_functional.py +++ b/testing/test_functional.py @@ -13,7 +13,10 @@ except SyntaxError: pytest.skip('pexpect wont work on py3k') child = pexpect.spawn(sys.executable, ['-S'], timeout=10) - child.logfile = sys.stdout + if sys.version_info >= (3, ): + child.logfile = sys.stdout.buffer + else: + child.logfile = sys.stdout child.sendline('from pyrepl.python_reader import main') child.sendline('main()') return child diff --git a/tox.ini b/tox.ini --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist= py27, py32 +envlist = py26, py27, pypy, py33 [pytest] codechecks = pep8 pyflakes From noreply at buildbot.pypy.org Tue Apr 15 16:00:14 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 15 Apr 2014 16:00:14 +0200 (CEST) Subject: [pypy-commit] pyrepl msabramo/improve_test_readline: Close branch msabramo/improve_test_readline Message-ID: <20140415140014.C81CE1C0433@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: msabramo/improve_test_readline Changeset: r257:5f3ef19efb47 Date: 2014-04-15 16:00 +0200 http://bitbucket.org/pypy/pyrepl/changeset/5f3ef19efb47/ Log: Close branch msabramo/improve_test_readline From noreply at buildbot.pypy.org Tue Apr 15 18:18:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 15 Apr 2014 18:18:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Write down the early name of (part of) this report Message-ID: <20140415161855.307651D2B42@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70637:715d9bf8ebbd Date: 2014-04-15 18:18 +0200 http://bitbucket.org/pypy/pypy/changeset/715d9bf8ebbd/ Log: Write down the early name of (part of) this report diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -92,7 +92,9 @@ `D07.1 Massive Parallelism and Translation Aspects`_ is a report about PyPy's optimization efforts, garbage collectors and massive parallelism (stackless) features. This report refers to the paper `PyPy's approach -to virtual machine construction`_. *(2007-02-28)* +to virtual machine construction`_. Extends the content previously +available in the document "Memory management and threading models as +translation aspects -- solutions and challenges". *(2007-02-28)* From noreply at buildbot.pypy.org Tue Apr 15 18:47:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 15 Apr 2014 18:47:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: Use shadowstack markers to avoid re-scanning the complete stack at every Message-ID: <20140415164735.4F9FC1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1159:918b1901b1f9 Date: 2014-04-15 18:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/918b1901b1f9/ Log: Use shadowstack markers to avoid re-scanning the complete stack at every minor collection. Needs to be supported explicitly by pypy. diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -194,6 +194,7 @@ { int status; stm_register_thread_local(&stm_thread_local); + char *org = (char *)stm_thread_local.shadowstack; STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ @@ -202,7 +203,7 @@ } STM_POP_ROOT(stm_thread_local, global_chained_list); - assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + assert(org == (char *)stm_thread_local.shadowstack); unregister_thread_local(); status = sem_post(&done); assert(status == 0); diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -50,6 +50,7 @@ { int status; stm_register_thread_local(&stm_thread_local); + char *org = (char *)stm_thread_local.shadowstack; tl_counter = 0; object_t *tmp; @@ -65,7 +66,7 @@ i++; } - assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + assert(org == (char *)stm_thread_local.shadowstack); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -379,8 +379,8 @@ struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(current->ss != (object_t *)-1); - mark_visit_object(current->ss, segment_base); + if (((uintptr_t)current->ss) > STM_STACK_MARKER_OLD) + mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -156,10 +156,32 @@ stm_thread_local_t *tl = STM_SEGMENT->running_thread; struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; - while (current-- != base) { - assert(current->ss != (object_t *)-1); - minor_trace_if_young(¤t->ss); + while (1) { + --current; + OPT_ASSERT(current >= base); + + switch ((uintptr_t)current->ss) { + + case 0: /* NULL */ + continue; + + case STM_STACK_MARKER_NEW: + /* the marker was not already seen: mark it as seen, + but continue looking more deeply in the shadowstack */ + current->ss = (object_t *)STM_STACK_MARKER_OLD; + continue; + + case STM_STACK_MARKER_OLD: + /* the marker was already seen: we can stop the + root stack tracing at this point */ + goto interrupt; + + default: + /* the stack entry is a regular pointer */ + minor_trace_if_young(¤t->ss); + } } + interrupt: minor_trace_if_young(&tl->thread_local_obj); } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -153,11 +153,13 @@ struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; + STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD); } static void _done_shadow_stack(stm_thread_local_t *tl) { - assert(tl->shadowstack >= tl->shadowstack_base); + assert(tl->shadowstack > tl->shadowstack_base); + assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD); char *start = (char *)tl->shadowstack_base; _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -70,7 +70,7 @@ s_mutex_lock(); fprintf(stderr, "thread %p:\n", tl); for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %9u %.3f s\n", + fprintf(stderr, " %-24s %9u %8.3f s\n", timer_names[i], tl->events[i], (double)tl->timing[i]); } s_mutex_unlock(); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -264,6 +264,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) +#define STM_STACK_MARKER_NEW 1 +#define STM_STACK_MARKER_OLD 2 /* Every thread needs to have a corresponding stm_thread_local_t diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -12,6 +12,8 @@ #define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... +#define STM_STACK_MARKER_NEW ... +#define STM_STACK_MARKER_OLD ... struct stm_shadowentry_s { object_t *ss; @@ -504,7 +506,8 @@ def pop_root(self): tl = self.tls[self.current_thread] curlength = tl.shadowstack - tl.shadowstack_base - if curlength == 0: + assert curlength >= 1 + if curlength == 1: raise EmptyStack assert 0 < curlength <= SHADOWSTACK_LENGTH tl.shadowstack -= 1 diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -228,3 +228,22 @@ self.start_transaction() assert stm_get_char(self.get_thread_local_obj()) == 'L' + + def test_marker_1(self): + self.start_transaction() + p1 = stm_allocate(600) + stm_set_char(p1, 'o') + self.push_root(p1) + self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_NEW)) + p2 = stm_allocate(600) + stm_set_char(p2, 't') + self.push_root(p2) + stm_major_collect() + assert lib._stm_total_allocated() == 2 * 616 + # + p2 = self.pop_root() + m = self.pop_root() + assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD) + p1 = self.pop_root() + assert stm_get_char(p1) == 'o' + assert stm_get_char(p2) == 't' diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -197,3 +197,42 @@ self.start_transaction() assert lib.stm_can_move(old) == 0 + + def test_marker_1(self): + self.start_transaction() + p1 = stm_allocate(600) + stm_set_char(p1, 'o') + self.push_root(p1) + self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_NEW)) + p2 = stm_allocate(600) + stm_set_char(p2, 't') + self.push_root(p2) + stm_minor_collect() + assert lib._stm_total_allocated() == 2 * 616 + # + p2 = self.pop_root() + m = self.pop_root() + assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD) + p1 = self.pop_root() + assert stm_get_char(p1) == 'o' + assert stm_get_char(p2) == 't' + + def test_marker_2(self): + self.start_transaction() + p1 = stm_allocate(600) + stm_set_char(p1, 'o') + self.push_root(p1) + self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD)) + p2 = stm_allocate(600) + stm_set_char(p2, 't') + self.push_root(p2) + stm_minor_collect() + assert lib._stm_total_allocated() == 1 * 616 + # + p2 = self.pop_root() + m = self.pop_root() + assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD) + assert stm_get_char(p2) == 't' + # the 'p1' reference is invalid now, don't try to read it. + # we check that it's invalid because _stm_total_allocated() + # only records one of the two objects. From noreply at buildbot.pypy.org Tue Apr 15 20:23:22 2014 From: noreply at buildbot.pypy.org (Matthew Miller) Date: Tue, 15 Apr 2014 20:23:22 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add missing list copy function to fix test_list for Python 3 Message-ID: <20140415182322.CE00B1C02C7@cobra.cs.uni-duesseldorf.de> Author: Matthew Miller Branch: py3.3 Changeset: r70638:ced27c96f4c8 Date: 2014-04-14 16:58 -0400 http://bitbucket.org/pypy/pypy/changeset/ced27c96f4c8/ Log: Add missing list copy function to fix test_list for Python 3 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1841,6 +1841,7 @@ sort = interp2app(W_ListObject.descr_sort), index = interp2app(W_ListObject.descr_index), + copy = interp2app(W_ListObject.clone), append = interp2app(W_ListObject.append), reverse = interp2app(W_ListObject.descr_reverse), __reversed__ = interp2app(W_ListObject.descr_reversed), diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1059,6 +1059,32 @@ l.append(l) assert repr(l) == '[[...]]' + def test_copy(self): + # test that empty list copies the empty list + l = [] + c = l.copy() + assert c == [] + + # test that the items of a list are the same + l = list(range(3)) + c = l.copy() + assert l == c + + # test that it's indeed a copy and not a reference + l = ['a', 'b'] + c = l.copy() + c.append('i') + assert l == ['a', 'b'] + assert c == l + ['i'] + + # test that it's a shallow, not a deep copy + l = [1, 2, [3, 4], 5] + c = l.copy() + assert l == c + assert c[3] == l[3] + + raises(TypeError, l.copy, None) + def test_append(self): l = [] l.append('X') From noreply at buildbot.pypy.org Tue Apr 15 20:23:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 15 Apr 2014 20:23:24 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in matthewmiller/pypy/py3.3 (pull request #228) Message-ID: <20140415182324.1C36C1C02C7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r70639:ced5abcc03ef Date: 2014-04-15 11:22 -0700 http://bitbucket.org/pypy/pypy/changeset/ced5abcc03ef/ Log: Merged in matthewmiller/pypy/py3.3 (pull request #228) Add missing list copy function to fix test_list for Python 3 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1845,6 +1845,7 @@ sort = interp2app(W_ListObject.descr_sort), index = interp2app(W_ListObject.descr_index), + copy = interp2app(W_ListObject.clone), append = interp2app(W_ListObject.append), reverse = interp2app(W_ListObject.descr_reverse), __reversed__ = interp2app(W_ListObject.descr_reversed), diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1077,6 +1077,32 @@ l.append(l) assert repr(l) == '[[...]]' + def test_copy(self): + # test that empty list copies the empty list + l = [] + c = l.copy() + assert c == [] + + # test that the items of a list are the same + l = list(range(3)) + c = l.copy() + assert l == c + + # test that it's indeed a copy and not a reference + l = ['a', 'b'] + c = l.copy() + c.append('i') + assert l == ['a', 'b'] + assert c == l + ['i'] + + # test that it's a shallow, not a deep copy + l = [1, 2, [3, 4], 5] + c = l.copy() + assert l == c + assert c[3] == l[3] + + raises(TypeError, l.copy, None) + def test_append(self): l = [] l.append('X') From noreply at buildbot.pypy.org Tue Apr 15 20:31:58 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 15 Apr 2014 20:31:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: merge py3k Message-ID: <20140415183158.0DFA01C02C7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70640:ebfddb606dbd Date: 2014-04-14 15:18 -0700 http://bitbucket.org/pypy/pypy/changeset/ebfddb606dbd/ Log: merge py3k diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -25,6 +25,7 @@ except ImportError: pass # + run_interactive = run_simple_interactive_console try: if not os.isatty(sys.stdin.fileno()): # Bail out if stdin is not tty-like, as pyrepl wouldn't be happy @@ -35,13 +36,12 @@ if not check(): raise ImportError from pyrepl.simple_interact import run_multiline_interactive_console + run_interactive = run_multiline_interactive_console except ImportError: - run_simple_interactive_console(mainmodule) + pass except SyntaxError: print("Warning: 'import pyrepl' failed with SyntaxError") - run_simple_interactive_console(mainmodule) - else: - run_multiline_interactive_console(mainmodule) + run_interactive(mainmodule) def run_simple_interactive_console(mainmodule): import code diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -53,10 +53,10 @@ if sys.platform == 'win32': # XXX pyconfig.h uses a pragma to link to the import library, # which is currently python3.lib - library = os.path.join(thisdir, '..', 'include', 'python3') + library = os.path.join(thisdir, '..', 'include', 'python32') if not os.path.exists(library + '.lib'): # For a local translation or nightly build - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python3') + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python32') assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -115,12 +115,22 @@ continue print "Picking %s" % p binaries.append((p, p.basename)) - importlib_name = 'python27.lib' + importlib_name = 'libpypy-c.lib' if pypy_c.dirpath().join(importlib_name).check(): - shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), - str(pypydir.join('include/python27.lib'))) - print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), - pypydir.join('include/python27.lib')) + try: + ver = subprocess.check_output([r'pypy\goal\pypy-c','-c', + "import sys;print(sys.version)"]) + importlib_target = 'python%s%s.lib' % (ver[0], ver[2]) + shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), + str(pypydir.join(importlib_target))) + # XXX fix this, either an additional build step or rename + # both DLL and LIB to versioned names, like cpython + shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), + str(pypy_c.dirpath().join(importlib_target))) + print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), + pypydir.join('include', importlib_target)) + except: + pass else: pass # XXX users will complain that they cannot compile cpyext diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -16,6 +16,8 @@ shell_default = True def _run(executable, args, env, cwd): # unless overridden below + if sys.platform == 'win32': + executable = executable.replace('/','\\') if isinstance(args, str): args = str(executable) + ' ' + args shell = True diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -476,11 +476,11 @@ shutil_copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) if sys.platform == 'win32': - # the import library is named python27.lib, according - # to the pragma in pyconfig.h - libname = str(newsoname.dirpath().join('python27.lib')) - shutil.copyfile(str(soname.new(ext='lib')), libname) - self.log.info("copied: %s" % (libname,)) + # copy the import library as well + libname = soname.new(ext='lib') + newlibname = newexename.new(basename=soname.basename) + shutil.copyfile(str(libname), str(newlibname.new(ext='lib'))) + self.log.info("copied: %s" % (newlibname,)) self.c_entryp = newexename self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -72,7 +72,7 @@ td.create_exe() assert dst_name.read() == 'exe' assert dst_name.new(ext='dll').read() == 'dll' - assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib' + assert dst_name.new(ext='lib').read() == 'lib' def test_shutil_copy(): a = udir.join('file_a') From noreply at buildbot.pypy.org Tue Apr 15 20:31:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 15 Apr 2014 20:31:59 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fill in copy's doc string Message-ID: <20140415183159.548B61C02C7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r70641:d71832240c1a Date: 2014-04-15 11:31 -0700 http://bitbucket.org/pypy/pypy/changeset/d71832240c1a/ Log: fill in copy's doc string diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -592,6 +592,10 @@ '''L.clear() -- remove all items''' self.clear(space) + def descr_copy(self, space): + '''L.copy() -> list -- a shallow copy of L''' + return self.clone() + def descr_remove(self, space, w_value): 'L.remove(value) -- remove first occurrence of value' # needs to be safe against eq_w() mutating the w_list behind our back @@ -1845,7 +1849,7 @@ sort = interp2app(W_ListObject.descr_sort), index = interp2app(W_ListObject.descr_index), - copy = interp2app(W_ListObject.clone), + copy = interp2app(W_ListObject.descr_copy), append = interp2app(W_ListObject.append), reverse = interp2app(W_ListObject.descr_reverse), __reversed__ = interp2app(W_ListObject.descr_reversed), From noreply at buildbot.pypy.org Wed Apr 16 01:11:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Apr 2014 01:11:18 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt http://hg.python.org/cpython/rev/f4271cc2dfb5 from cpython-3.3: we Message-ID: <20140415231118.21B4C1C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70642:672f831a77d7 Date: 2014-04-15 16:10 -0700 http://bitbucket.org/pypy/pypy/changeset/672f831a77d7/ Log: adapt http://hg.python.org/cpython/rev/f4271cc2dfb5 from cpython-3.3: we already match 3.3's _sre.MAXREPEAT value for 32 bit, so we also need this associated fix diff --git a/lib-python/3/sre_compile.py b/lib-python/3/sre_compile.py --- a/lib-python/3/sre_compile.py +++ b/lib-python/3/sre_compile.py @@ -13,7 +13,6 @@ import _sre, sys import sre_parse from sre_constants import * -from _sre import MAXREPEAT assert _sre.MAGIC == MAGIC, "SRE module mismatch" @@ -356,8 +355,6 @@ def _simple(av): # check if av is a "simple" operator lo, hi = av[2].getwidth() - if lo == 0 and hi == MAXREPEAT: - raise error("nothing to repeat") return lo == hi == 1 and av[2][0][0] != SUBPATTERN def _compile_info(code, pattern, flags): From noreply at buildbot.pypy.org Wed Apr 16 08:24:10 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 16 Apr 2014 08:24:10 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: a test for structs (that simply works) Message-ID: <20140416062410.C848E1D2491@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70643:bf2cb37cad83 Date: 2014-04-15 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/bf2cb37cad83/ Log: a test for structs (that simply works) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -9,7 +9,8 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ equaloplists from rpython.jit.metainterp.optimizeopt.intutils import IntBound -from rpython.jit.metainterp.optimizeopt.virtualize import VirtualValue, VArrayValue +from rpython.jit.metainterp.optimizeopt.virtualize import (VirtualValue, + VArrayValue, VStructValue) from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import ResOperation, rop @@ -479,7 +480,7 @@ """ self.guards(info1, info2, value1, expected, [self.nodebox]) - def test_generate_guards_on_virtual_fields_matches_struct(self): + def test_generate_guards_on_virtual_fields_matches_instance(self): innervalue1 = OptValue(self.nodebox) constclassbox = self.cpu.ts.cls_of_box(self.nodebox) innervalue1.make_constant_class(constclassbox, -1) @@ -504,6 +505,32 @@ """ self.guards(info1, info2, value1, expected, [self.nodebox]) + def test_generate_guards_on_virtual_fields_matches_struct(self): + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + structdescr = object() + + info1 = VStructStateInfo(structdescr, [1]) + info1.fieldstate = [innerinfo1] + + info2 = VStructStateInfo(structdescr, [1]) + info2.fieldstate = [innerinfo2] + + value1 = VStructValue(self.cpu, structdescr, self.nodebox) + value1._fields = {1: OptValue(self.nodebox)} + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected, [self.nodebox]) # _________________________________________________________________________ # the below tests don't really have anything to do with guard generation From noreply at buildbot.pypy.org Wed Apr 16 08:24:12 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 16 Apr 2014 08:24:12 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: make nicer Message-ID: <20140416062412.0A3441D2491@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70644:55a9dbfac218 Date: 2014-04-15 15:24 +0200 http://bitbucket.org/pypy/pypy/changeset/55a9dbfac218/ Log: make nicer diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -154,16 +154,8 @@ start_target = start_label.getdescr() assert isinstance(stop_target, TargetToken) assert isinstance(start_target, TargetToken) - if stop_target.targeting_jitcell_token is not start_target.targeting_jitcell_token: - return False + return stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token - return True - - #args = stop_label.getarglist() - #modifier = VirtualStateAdder(self.optimizer) - #virtual_state = modifier.get_virtual_state(args) - #if self.initial_virtual_state.generalization_of(virtual_state): - # return True def export_state(self, targetop): original_jump_args = targetop.getarglist() From noreply at buildbot.pypy.org Wed Apr 16 08:24:13 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 16 Apr 2014 08:24:13 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: extract virtual state construction into a method Message-ID: <20140416062413.322AB1D2491@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70645:68bcb1950761 Date: 2014-04-15 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/68bcb1950761/ Log: extract virtual state construction into a method diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -54,6 +54,10 @@ self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) self.boxes_created_this_iteration = None + def get_virtual_state(self, args): + modifier = VirtualStateAdder(self.optimizer) + return modifier.get_virtual_state(args) + def fix_snapshot(self, jump_args, snapshot): if snapshot is None: return None @@ -161,8 +165,7 @@ original_jump_args = targetop.getarglist() jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(jump_args) + virtual_state = self.get_virtual_state(jump_args) values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values, self.optimizer) @@ -215,8 +218,7 @@ if not exported_state: # No state exported, construct one without virtuals self.short = None - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(self.inputargs) + virtual_state = self.get_virtual_state(self.inputargs) self.initial_virtual_state = virtual_state return @@ -342,8 +344,7 @@ # Verify that the virtual state at the end of the loop is one # that is compatible with the virtual state at the start of the loop - modifier = VirtualStateAdder(self.optimizer) - final_virtual_state = modifier.get_virtual_state(original_jumpargs) + final_virtual_state = self.get_virtual_state(original_jumpargs) #debug_start('jit-log-virtualstate') #virtual_state.debug_print('Closed loop with ') bad = {} @@ -513,8 +514,7 @@ return True args = jumpop.getarglist() - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(args) + virtual_state = self.get_virtual_state(args) values = [self.getvalue(arg) for arg in jumpop.getarglist()] debug_start('jit-log-virtualstate') From noreply at buildbot.pypy.org Wed Apr 16 08:24:14 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 16 Apr 2014 08:24:14 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: disentangle resume and virtualstate which have *nothing* at all to do with each Message-ID: <20140416062414.700F61D2491@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70646:4b34877aa5e4 Date: 2014-04-15 16:54 +0200 http://bitbucket.org/pypy/pypy/changeset/4b34877aa5e4/ Log: disentangle resume and virtualstate which have *nothing* at all to do with each other conceptually introduce a slightly weird virtual walking api to do that diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -135,13 +135,21 @@ def force_at_end_of_preamble(self, already_forced, optforce): return self - def get_args_for_fail(self, modifier): + # visitor API + + def visitor_walk_recursive(self, visitor): pass - def make_virtual_info(self, modifier, fieldnums): - #raise NotImplementedError # should not be called on this level - assert fieldnums is None - return modifier.make_not_virtual(self) + @specialize.argtype(1) + def visitor_dispatch_virtual_type(self, visitor): + if self.is_virtual(): + return self._visitor_dispatch_virtual_type(visitor) + else: + return visitor.visit_not_virtual(self) + + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + assert 0, "unreachable" def is_constant(self): return self.level == LEVEL_CONSTANT diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -61,24 +61,6 @@ lst6 = virt1._get_field_descr_list() assert lst6 is lst3 -def test_reuse_vinfo(): - class FakeVInfo(object): - def set_content(self, fieldnums): - self.fieldnums = fieldnums - def equals(self, fieldnums): - return self.fieldnums == fieldnums - class FakeVirtualValue(virtualize.AbstractVirtualValue): - def _make_virtual(self, *args): - return FakeVInfo() - v1 = FakeVirtualValue(None, None) - vinfo1 = v1.make_virtual_info(None, [1, 2, 4]) - vinfo2 = v1.make_virtual_info(None, [1, 2, 4]) - assert vinfo1 is vinfo2 - vinfo3 = v1.make_virtual_info(None, [1, 2, 6]) - assert vinfo3 is not vinfo2 - vinfo4 = v1.make_virtual_info(None, [1, 2, 6]) - assert vinfo3 is vinfo4 - def test_descrlist_dict(): from rpython.jit.metainterp.optimizeopt import util as optimizeutil h1 = optimizeutil.descrlist_hash([]) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -6,7 +6,7 @@ from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, Optimization -from rpython.jit.metainterp.optimizeopt.virtualstate import (VirtualStateAdder, +from rpython.jit.metainterp.optimizeopt.virtualstate import (VirtualStateConstructor, ShortBoxes, BadVirtualState, VirtualStatesCantMatch) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.resume import Snapshot @@ -55,7 +55,7 @@ self.boxes_created_this_iteration = None def get_virtual_state(self, args): - modifier = VirtualStateAdder(self.optimizer) + modifier = VirtualStateConstructor(self.optimizer) return modifier.get_virtual_state(args) def fix_snapshot(self, jump_args, snapshot): diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.optimizeopt.rawbuffer import RawBuffer, InvalidRawOperation from rpython.jit.metainterp.resoperation import rop, ResOperation -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize class AbstractVirtualValue(optimizer.OptValue): @@ -45,27 +45,17 @@ return value return OptValue(self.force_box(optforce)) - def get_args_for_fail(self, modifier): + def visitor_walk_recursive(self, visitor): # checks for recursion: it is False unless # we have already seen the very same keybox - if self.box is None and not modifier.already_seen_virtual(self.keybox): - self._get_args_for_fail(modifier) + if self.box is None and not visitor.already_seen_virtual(self.keybox): + self._visitor_walk_recursive(visitor) - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): raise NotImplementedError("abstract base") - def make_virtual_info(self, modifier, fieldnums): - if fieldnums is None: - return self._make_virtual(modifier) - vinfo = self._cached_vinfo - if vinfo is not None and vinfo.equals(fieldnums): - return vinfo - vinfo = self._make_virtual(modifier) - vinfo.set_content(fieldnums) - self._cached_vinfo = vinfo - return vinfo - - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): raise NotImplementedError("abstract base") def _really_force(self, optforce): @@ -202,13 +192,13 @@ self._cached_sorted_fields = lst return lst - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): lst = self._get_field_descr_list() fieldboxes = [self._fields[ofs].get_key_box() for ofs in lst] - modifier.register_virtual_fields(self.keybox, fieldboxes) + visitor.register_virtual_fields(self.keybox, fieldboxes) for ofs in lst: fieldvalue = self._fields[ofs] - fieldvalue.get_args_for_fail(modifier) + fieldvalue.visitor_walk_recursive(visitor) class VirtualValue(AbstractVirtualStructValue): level = optimizer.LEVEL_KNOWNCLASS @@ -218,9 +208,10 @@ assert isinstance(known_class, Const) self.known_class = known_class - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): fielddescrs = self._get_field_descr_list() - return modifier.make_virtual(self.known_class, fielddescrs) + return visitor.visit_virtual(self.known_class, fielddescrs) def _get_descr(self): return vtable2descr(self.cpu, self.known_class.getint()) @@ -238,9 +229,10 @@ AbstractVirtualStructValue.__init__(self, cpu, keybox, source_op) self.structdescr = structdescr - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): fielddescrs = self._get_field_descr_list() - return modifier.make_vstruct(self.structdescr, fielddescrs) + return visitor.visit_vstruct(self.structdescr, fielddescrs) def _get_descr(self): return self.structdescr @@ -260,15 +252,15 @@ def set_item_value(self, i, newval): raise NotImplementedError - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): itemboxes = [] for i in range(self.getlength()): itemvalue = self.get_item_value(i) itemboxes.append(itemvalue.get_key_box()) - modifier.register_virtual_fields(self.keybox, itemboxes) + visitor.register_virtual_fields(self.keybox, itemboxes) for i in range(self.getlength()): itemvalue = self.get_item_value(i) - itemvalue.get_args_for_fail(modifier) + itemvalue.visitor_walk_recursive(visitor) class VArrayValue(AbstractVArrayValue): @@ -326,8 +318,9 @@ descr=self.arraydescr) optforce.emit_operation(op) - def _make_virtual(self, modifier): - return modifier.make_varray(self.arraydescr) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_varray(self.arraydescr) class VArrayStructValue(AbstractVirtualValue): @@ -373,16 +366,16 @@ descrs.append(item_descrs) return descrs - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): itemdescrs = self._get_list_of_descrs() itemboxes = [] for i in range(len(self._items)): for descr in itemdescrs[i]: itemboxes.append(self._items[i][descr].get_key_box()) - modifier.register_virtual_fields(self.keybox, itemboxes) + visitor.register_virtual_fields(self.keybox, itemboxes) for i in range(len(self._items)): for descr in itemdescrs[i]: - self._items[i][descr].get_args_for_fail(modifier) + self._items[i][descr].visitor_walk_recursive(visitor) def force_at_end_of_preamble(self, already_forced, optforce): if self in already_forced: @@ -393,8 +386,9 @@ self._items[index][descr] = self._items[index][descr].force_at_end_of_preamble(already_forced, optforce) return self - def _make_virtual(self, modifier): - return modifier.make_varraystruct(self.arraydescr, self._get_list_of_descrs()) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_varraystruct(self.arraydescr, self._get_list_of_descrs()) class VRawBufferValue(AbstractVArrayValue): @@ -442,11 +436,12 @@ descr=descr) optforce.emit_operation(op) - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): # I *think* we need to make a copy of offsets and descrs because we # want a snapshot of the virtual state right now: if we grow more # elements later, we don't want them to go in this virtual state - return modifier.make_vrawbuffer(self.size, + return visitor.visit_vrawbuffer(self.size, self.buffer.offsets[:], self.buffer.descrs[:]) @@ -474,13 +469,14 @@ def getitem_raw(self, offset, length, descr): return self.rawbuffer_value.getitem_raw(self.offset+offset, length, descr) - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): box = self.rawbuffer_value.get_key_box() - modifier.register_virtual_fields(self.keybox, [box]) - self.rawbuffer_value.get_args_for_fail(modifier) + visitor.register_virtual_fields(self.keybox, [box]) + self.rawbuffer_value.visitor_walk_recursive(visitor) - def _make_virtual(self, modifier): - return modifier.make_vrawslice(self.offset) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_vrawslice(self.offset) class OptVirtualize(optimizer.Optimization): diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -1,4 +1,4 @@ -from rpython.jit.metainterp import resume +from rpython.jit.metainterp.walkvirtual import VirtualVisitor from rpython.jit.metainterp.history import (BoxInt, ConstInt, BoxPtr, Const, ConstPtr, ConstFloat) from rpython.jit.metainterp.optimizeopt import virtualize @@ -7,7 +7,7 @@ LEVEL_KNOWNCLASS, LEVEL_NONNULL, LEVEL_UNKNOWN, OptValue) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.debug import debug_start, debug_stop, debug_print -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, import_from_mixin class BadVirtualState(Exception): @@ -31,7 +31,7 @@ bad = {} self.bad = bad -class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): +class AbstractVirtualStateInfo(object): position = -1 def generate_guards(self, other, value, state): @@ -506,7 +506,9 @@ s.debug_print(" ", seen, bad, metainterp_sd) -class VirtualStateAdder(resume.ResumeDataVirtualAdder): +class VirtualStateConstructor(object): + import_from_mixin(VirtualVisitor) + def __init__(self, optimizer): self.fieldboxes = {} self.optimizer = optimizer @@ -527,12 +529,10 @@ try: info = self.info[box] except KeyError: + self.info[box] = info = value.visitor_dispatch_virtual_type(self) if value.is_virtual(): - self.info[box] = info = value.make_virtual_info(self, None) flds = self.fieldboxes[box] info.fieldstate = [self.state(b) for b in flds] - else: - self.info[box] = info = self.make_not_virtual(value) return info def get_virtual_state(self, jump_args): @@ -547,31 +547,25 @@ for box in jump_args] for value in values: - if value.is_virtual(): - value.get_args_for_fail(self) - else: - self.make_not_virtual(value) + value.visitor_walk_recursive(self) return VirtualState([self.state(box) for box in jump_args]) - def make_not_virtual(self, value): + def visit_not_virtual(self, value): is_opaque = value in self.optimizer.opaque_pointers return NotVirtualStateInfo(value, is_opaque) - def make_virtual(self, known_class, fielddescrs): + def visit_virtual(self, known_class, fielddescrs): return VirtualStateInfo(known_class, fielddescrs) - def make_vstruct(self, typedescr, fielddescrs): + def visit_vstruct(self, typedescr, fielddescrs): return VStructStateInfo(typedescr, fielddescrs) - def make_varray(self, arraydescr): + def visit_varray(self, arraydescr): return VArrayStateInfo(arraydescr) - def make_varraystruct(self, arraydescr, fielddescrs): + def visit_varraystruct(self, arraydescr, fielddescrs): return VArrayStructStateInfo(arraydescr, fielddescrs) - def make_vrawbuffer(self, size, offsets, descrs): - raise NotImplementedError - class BoxNotProducable(Exception): pass diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -199,22 +199,22 @@ offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) return offsetbox - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - charboxes = [] - for value in self._chars: - if value is not None: - box = value.get_key_box() - else: - box = None - charboxes.append(box) - modifier.register_virtual_fields(self.keybox, charboxes) - for value in self._chars: - if value is not None: - value.get_args_for_fail(modifier) + def _visitor_walk_recursive(self, visitor): + charboxes = [] + for value in self._chars: + if value is not None: + box = value.get_key_box() + else: + box = None + charboxes.append(box) + visitor.register_virtual_fields(self.keybox, charboxes) + for value in self._chars: + if value is not None: + value.visitor_walk_recursive(visitor) - def _make_virtual(self, modifier): - return modifier.make_vstrplain(self.mode is mode_unicode) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_vstrplain(self.mode is mode_unicode) class VStringConcatValue(VAbstractStringValue): @@ -256,18 +256,18 @@ offsetbox, mode) return offsetbox - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - # we don't store the lengthvalue in guards, because the - # guard-failed code starts with a regular STR_CONCAT again - leftbox = self.left.get_key_box() - rightbox = self.right.get_key_box() - modifier.register_virtual_fields(self.keybox, [leftbox, rightbox]) - self.left.get_args_for_fail(modifier) - self.right.get_args_for_fail(modifier) + def _visitor_walk_recursive(self, visitor): + # we don't store the lengthvalue in guards, because the + # guard-failed code starts with a regular STR_CONCAT again + leftbox = self.left.get_key_box() + rightbox = self.right.get_key_box() + visitor.register_virtual_fields(self.keybox, [leftbox, rightbox]) + self.left.visitor_walk_recursive(visitor) + self.right.visitor_walk_recursive(visitor) - def _make_virtual(self, modifier): - return modifier.make_vstrconcat(self.mode is mode_unicode) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_vstrconcat(self.mode is mode_unicode) class VStringSliceValue(VAbstractStringValue): @@ -302,18 +302,18 @@ self.vstart.force_box(string_optimizer), offsetbox, lengthbox, mode) - def get_args_for_fail(self, modifier): - if self.box is None and not modifier.already_seen_virtual(self.keybox): - boxes = [self.vstr.get_key_box(), - self.vstart.get_key_box(), - self.vlength.get_key_box()] - modifier.register_virtual_fields(self.keybox, boxes) - self.vstr.get_args_for_fail(modifier) - self.vstart.get_args_for_fail(modifier) - self.vlength.get_args_for_fail(modifier) + def _visitor_walk_recursive(self, visitor): + boxes = [self.vstr.get_key_box(), + self.vstart.get_key_box(), + self.vlength.get_key_box()] + visitor.register_virtual_fields(self.keybox, boxes) + self.vstr.visitor_walk_recursive(visitor) + self.vstart.visitor_walk_recursive(visitor) + self.vlength.visitor_walk_recursive(visitor) - def _make_virtual(self, modifier): - return modifier.make_vstrslice(self.mode is mode_unicode) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_vstrslice(self.mode is mode_unicode) def copy_str_content(string_optimizer, srcbox, targetbox, diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -4,12 +4,14 @@ BoxInt, BoxPtr, BoxFloat, INT, REF, FLOAT, AbstractDescr) from rpython.jit.metainterp.resoperation import rop from rpython.rlib import rarithmetic, rstack -from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id +from rpython.rlib.objectmodel import (we_are_translated, specialize, + compute_unique_id, import_from_mixin) from rpython.rlib.debug import (have_debug_prints, ll_assert, debug_start, debug_stop, debug_print) from rpython.rtyper import annlowlevel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper.lltypesystem.rclass import OBJECTPTR +from rpython.jit.metainterp.walkvirtual import VirtualVisitor # Logic to encode the chain of frames and the state of the boxes at a @@ -267,42 +269,60 @@ class ResumeDataVirtualAdder(object): + import_from_mixin(VirtualVisitor) + def __init__(self, storage, memo): self.storage = storage self.memo = memo - def make_virtual(self, known_class, fielddescrs): + def make_virtual_info(self, value, fieldnums): + assert fieldnums is not None + vinfo = value._cached_vinfo + if vinfo is not None and vinfo.equals(fieldnums): + return vinfo + vinfo = value.visitor_dispatch_virtual_type(self) + vinfo.set_content(fieldnums) + value._cached_vinfo = vinfo + return vinfo + + def visit_not_virtual(self, value): + assert 0, "unreachable" + + def visit_virtual(self, known_class, fielddescrs): return VirtualInfo(known_class, fielddescrs) - def make_vstruct(self, typedescr, fielddescrs): + def visit_vstruct(self, typedescr, fielddescrs): return VStructInfo(typedescr, fielddescrs) - def make_varray(self, arraydescr): + def visit_varray(self, arraydescr): return VArrayInfo(arraydescr) - def make_varraystruct(self, arraydescr, fielddescrs): + def visit_varraystruct(self, arraydescr, fielddescrs): return VArrayStructInfo(arraydescr, fielddescrs) - def make_vrawbuffer(self, size, offsets, descrs): + def visit_vrawbuffer(self, size, offsets, descrs): return VRawBufferInfo(size, offsets, descrs) - def make_vrawslice(self, offset): + def visit_vrawslice(self, offset): return VRawSliceInfo(offset) - def make_vstrplain(self, is_unicode=False): + def visit_vstrplain(self, is_unicode=False): if is_unicode: return VUniPlainInfo() - return VStrPlainInfo() + else: + return VStrPlainInfo() - def make_vstrconcat(self, is_unicode=False): + def visit_vstrconcat(self, is_unicode=False): if is_unicode: return VUniConcatInfo() - return VStrConcatInfo() + else: + return VStrConcatInfo() - def make_vstrslice(self, is_unicode=False): + def visit_vstrslice(self, is_unicode=False): if is_unicode: return VUniSliceInfo() - return VStrSliceInfo() + else: + return VStrSliceInfo() def register_virtual_fields(self, virtualbox, fieldboxes): tagged = self.liveboxes_from_env.get(virtualbox, UNASSIGNEDVIRTUAL) @@ -352,13 +372,13 @@ else: assert tagbits == TAGVIRTUAL value = optimizer.getvalue(box) - value.get_args_for_fail(self) + value.visitor_walk_recursive(self) for _, box, fieldbox, _ in pending_setfields: self.register_box(box) self.register_box(fieldbox) value = optimizer.getvalue(fieldbox) - value.get_args_for_fail(self) + value.visitor_walk_recursive(self) self._number_virtuals(liveboxes, optimizer, v) self._add_pending_fields(pending_setfields) @@ -410,7 +430,7 @@ value = optimizer.getvalue(virtualbox) fieldnums = [self._gettagged(box) for box in fieldboxes] - vinfo = value.make_virtual_info(self, fieldnums) + vinfo = self.make_virtual_info(value, fieldnums) # if a new vinfo instance is made, we get the fieldnums list we # pass in as an attribute. hackish. if vinfo.fieldnums is not fieldnums: diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.jit.metainterp.optimizeopt.optimizer import OptValue from rpython.jit.metainterp.optimizeopt.virtualize import VirtualValue, VArrayValue -from rpython.jit.metainterp.optimizeopt.virtualize import VStructValue +from rpython.jit.metainterp.optimizeopt.virtualize import VStructValue, AbstractVirtualValue from rpython.jit.metainterp.resume import * from rpython.jit.metainterp.history import BoxInt, BoxPtr, ConstInt from rpython.jit.metainterp.history import ConstPtr, ConstFloat @@ -66,6 +66,26 @@ assert v1.equals([1, 2, 4]) assert not v1.equals([1, 2, 6]) +def test_reuse_vinfo(): + class FakeVInfo(object): + def set_content(self, fieldnums): + self.fieldnums = fieldnums + def equals(self, fieldnums): + return self.fieldnums == fieldnums + class FakeVirtualValue(AbstractVirtualValue): + def visitor_dispatch_virtual_type(self, *args): + return FakeVInfo() + modifier = ResumeDataVirtualAdder(None, None) + v1 = FakeVirtualValue(None, None) + vinfo1 = modifier.make_virtual_info(v1, [1, 2, 4]) + vinfo2 = modifier.make_virtual_info(v1, [1, 2, 4]) + assert vinfo1 is vinfo2 + vinfo3 = modifier.make_virtual_info(v1, [1, 2, 6]) + assert vinfo3 is not vinfo2 + vinfo4 = modifier.make_virtual_info(v1, [1, 2, 6]) + assert vinfo3 is vinfo4 + + class MyMetaInterp: _already_allocated_resume_virtuals = None callinfocollection = None diff --git a/rpython/jit/metainterp/walkvirtual.py b/rpython/jit/metainterp/walkvirtual.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/walkvirtual.py @@ -0,0 +1,39 @@ +# this is some common infrastructure for code that needs to walk all virtuals +# at a specific instruction. It is used by resume and unroll. + +class VirtualVisitor(object): + def visit_not_virtual(self, value): + raise NotImplementedError("abstract base class") + + def visit_virtual(self, known_class, fielddescrs): + raise NotImplementedError("abstract base class") + + def visit_vstruct(self, typedescr, fielddescrs): + raise NotImplementedError("abstract base class") + + def visit_varray(self, arraydescr): + raise NotImplementedError("abstract base class") + + def visit_varraystruct(self, arraydescr, fielddescrs): + raise NotImplementedError("abstract base class") + + def visit_vrawbuffer(self, size, offsets, descrs): + raise NotImplementedError("abstract base class") + + def visit_vrawslice(self, offset): + raise NotImplementedError("abstract base class") + + def visit_vstrplain(self, is_unicode=False): + raise NotImplementedError("abstract base class") + + def visit_vstrconcat(self, is_unicode=False): + raise NotImplementedError("abstract base class") + + def visit_vstrslice(self, is_unicode=False): + raise NotImplementedError("abstract base class") + + def register_virtual_fields(self, virtualbox, fieldboxes): + raise NotImplementedError("abstract base class") + + def already_seen_virtual(self, virtualbox): + raise NotImplementedError("abstract base class") From noreply at buildbot.pypy.org Wed Apr 16 09:58:10 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 16 Apr 2014 09:58:10 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: fix translation - turns out a mixin doesn't work Message-ID: <20140416075810.5971C1C12E2@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70647:f96b890d7ea5 Date: 2014-04-16 09:57 +0200 http://bitbucket.org/pypy/pypy/changeset/f96b890d7ea5/ Log: fix translation - turns out a mixin doesn't work diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -506,8 +506,7 @@ s.debug_print(" ", seen, bad, metainterp_sd) -class VirtualStateConstructor(object): - import_from_mixin(VirtualVisitor) +class VirtualStateConstructor(VirtualVisitor): def __init__(self, optimizer): self.fieldboxes = {} diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -268,14 +268,15 @@ _frame_info_placeholder = (None, 0, 0) -class ResumeDataVirtualAdder(object): - import_from_mixin(VirtualVisitor) +class ResumeDataVirtualAdder(VirtualVisitor): def __init__(self, storage, memo): self.storage = storage self.memo = memo def make_virtual_info(self, value, fieldnums): + from rpython.jit.metainterp.optimizeopt.virtualize import AbstractVirtualValue + assert isinstance(value, AbstractVirtualValue) assert fieldnums is not None vinfo = value._cached_vinfo if vinfo is not None and vinfo.equals(fieldnums): From noreply at buildbot.pypy.org Wed Apr 16 10:33:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 10:33:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for 6dac6407412f. Very obscure. Message-ID: <20140416083316.392331D241C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70648:a82950d1732e Date: 2014-04-16 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/a82950d1732e/ Log: Fix for 6dac6407412f. Very obscure. diff --git a/pypy/module/__pypy__/app_signal.py b/pypy/module/__pypy__/app_signal.py --- a/pypy/module/__pypy__/app_signal.py +++ b/pypy/module/__pypy__/app_signal.py @@ -1,4 +1,9 @@ -import __pypy__.thread +import thread +# ^^ relative import of __pypy__.thread. Note that some tests depend on +# this (test_enable_signals in test_signal.py) to work properly, +# otherwise they get caught in some deadlock waiting for the import +# lock... + class SignalsEnabled(object): '''A context manager to use in non-main threads: @@ -8,7 +13,7 @@ that is within a "with signals_enabled:". This other thread should be ready to handle unexpected exceptions that the signal handler might raise --- notably KeyboardInterrupt.''' - __enter__ = __pypy__.thread._signals_enter - __exit__ = __pypy__.thread._signals_exit + __enter__ = thread._signals_enter + __exit__ = thread._signals_exit signals_enabled = SignalsEnabled() From noreply at buildbot.pypy.org Wed Apr 16 11:23:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 11:23:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix link (thanks Ryan on pull request #229) Message-ID: <20140416092324.AF8C11C12E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70649:6fbffc03e832 Date: 2014-04-16 11:22 +0200 http://bitbucket.org/pypy/pypy/changeset/6fbffc03e832/ Log: Fix link (thanks Ryan on pull request #229) diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -318,7 +318,7 @@ To read more about the RPython limitations read the `RPython description`_. -.. _`RPython description`: coding-guide.html#restricted-python +.. _`RPython description`: coding-guide.html#rpython-definition --------------------------------------------------------------- Does RPython have anything to do with Zope's Restricted Python? From noreply at buildbot.pypy.org Wed Apr 16 13:11:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 13:11:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Yet another attempt to unravel the mess that 6dac6407412f exposed. Message-ID: <20140416111108.3C0071D2BAA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70650:867cc494a9fa Date: 2014-04-16 11:10 +0000 http://bitbucket.org/pypy/pypy/changeset/867cc494a9fa/ Log: Yet another attempt to unravel the mess that 6dac6407412f exposed. This makes space.allocate_lock() raise CannotHaveLock if we're translating. Translation passes again. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -337,6 +337,9 @@ return 'internal subclass of %s' % (Class.__name__,) wrappable_class_name._annspecialcase_ = 'specialize:memo' +class CannotHaveLock(Exception): + """Raised by space.allocate_lock() if we're translating.""" + # ____________________________________________________________ class ObjSpace(object): @@ -663,6 +666,11 @@ def __allocate_lock(self): from rpython.rlib.rthread import allocate_lock, error + # hack: we can't have prebuilt locks if we're translating. + # In this special situation we should just not lock at all + # (translation is not multithreaded anyway). + if not we_are_translated() and self.config.translating: + raise CannotHaveLock() try: return allocate_lock() except error: diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -4,7 +4,7 @@ from rpython.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import ObjSpace, W_Root +from pypy.interpreter.baseobjspace import ObjSpace, W_Root, CannotHaveLock from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror @@ -33,19 +33,24 @@ def _try_acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock - if self.slock is None: - self.slock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.slockowner is me: return False # already acquired by the current thread - self.slock.acquire(True) + try: + if self.slock is None: + self.slock = self.space.allocate_lock() + except CannotHaveLock: + pass + else: + self.slock.acquire(True) assert self.slockowner is None self.slockowner = me return True def _release_lock(self): self.slockowner = None - self.slock.release() + if self.slock is not None: + self.slock.release() def lock(self): if not self._try_acquire_lock(): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -8,7 +8,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, generic_new_descr from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock from pypy.interpreter.eval import Code from pypy.interpreter.pycode import PyCode from rpython.rlib import streamio, jit @@ -753,26 +753,14 @@ me = self.space.getexecutioncontext() # used as thread ident return self.lockowner is me - def _can_have_lock(self): - # hack: we can't have self.lock != None during translation, - # because prebuilt lock objects are not allowed. In this - # special situation we just don't lock at all (translation is - # not multithreaded anyway). - if we_are_translated(): - return True # we need a lock at run-time - elif self.space.config.translating: - assert self.lock is None - return False - else: - return True # in py.py - def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock if self.lock is None: - if not self._can_have_lock(): + try: + self.lock = self.space.allocate_lock() + except CannotHaveLock: return - self.lock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is me: pass # already acquired by the current thread @@ -790,7 +778,7 @@ # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return - if not self._can_have_lock(): + if self.lock is None: # CannotHaveLock occurred return space = self.space raise OperationError(space.w_RuntimeError, diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -153,6 +153,9 @@ def __exit__(self, *args): self.release() + def _cleanup_(self): + raise Exception("seeing a prebuilt rpython.rlib.rthread.Lock instance") + # ____________________________________________________________ # # Stack size From noreply at buildbot.pypy.org Wed Apr 16 14:45:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 14:45:31 +0200 (CEST) Subject: [pypy-commit] pypy issue1514: A branch to run the tests in Message-ID: <20140416124531.79F0B1C355D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: issue1514 Changeset: r70651:7b0312827183 Date: 2014-04-16 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/7b0312827183/ Log: A branch to run the tests in From noreply at buildbot.pypy.org Wed Apr 16 14:45:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 14:45:32 +0200 (CEST) Subject: [pypy-commit] pypy issue1514: Apply pypy-import.diff3 from issue1514. Change the logic a bit Message-ID: <20140416124532.C0CF21C355D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: issue1514 Changeset: r70652:f96656bbecc9 Date: 2014-04-16 14:44 +0200 http://bitbucket.org/pypy/pypy/changeset/f96656bbecc9/ Log: Apply pypy-import.diff3 from issue1514. Change the logic a bit to be closer to the original (in particular, add the object to sys.modules even if it's not a W_Module). diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -440,10 +440,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -459,9 +460,20 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # Initialize the module + # Add the module to sys.modules and initialize the module + # The order is important to avoid recursions. from pypy.interpreter.module import Module if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) + # eventlet patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 + # w_mod.init(self) # Add the module to sys.modules diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,7 +579,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,7 +203,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -223,7 +222,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,7 +578,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -586,7 +585,6 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Wed Apr 16 14:49:22 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 16 Apr 2014 14:49:22 +0200 (CEST) Subject: [pypy-commit] pypy default: someone forgot those Message-ID: <20140416124922.3977E1C0026@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70653:6c60fdb521ad Date: 2014-04-16 14:48 +0200 http://bitbucket.org/pypy/pypy/changeset/6c60fdb521ad/ Log: someone forgot those diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -97,6 +97,7 @@ self.indices = [0] * len(self.shape_m1) self.offset = self.array.start + @jit.unroll_safe def next(self): self.index += 1 for i in xrange(self.ndim_m1, -1, -1): @@ -108,6 +109,7 @@ self.indices[i] = 0 self.offset -= self.backstrides[i] + @jit.unroll_safe def next_skip_x(self, step): assert step >= 0 if step == 0: From noreply at buildbot.pypy.org Wed Apr 16 15:14:27 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 16 Apr 2014 15:14:27 +0200 (CEST) Subject: [pypy-commit] pypy default: a more questionable unrolling here Message-ID: <20140416131427.57E771C355D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70654:054e2cc2266a Date: 2014-04-16 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/054e2cc2266a/ Log: a more questionable unrolling here diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -442,6 +442,7 @@ return v1 % v2 @simple_binary_op + @jit.unroll_iff(lambda self, v1, v2: jit.isconstant(v2)) def pow(self, v1, v2): if v2 < 0: return 0 From noreply at buildbot.pypy.org Wed Apr 16 15:27:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 15:27:59 +0200 (CEST) Subject: [pypy-commit] pypy issue1514: hg merge default Message-ID: <20140416132759.D107D1D241C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: issue1514 Changeset: r70655:8edf0636e375 Date: 2014-04-16 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/8edf0636e375/ Log: hg merge default diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -337,6 +337,9 @@ return 'internal subclass of %s' % (Class.__name__,) wrappable_class_name._annspecialcase_ = 'specialize:memo' +class CannotHaveLock(Exception): + """Raised by space.allocate_lock() if we're translating.""" + # ____________________________________________________________ class ObjSpace(object): @@ -675,6 +678,11 @@ def __allocate_lock(self): from rpython.rlib.rthread import allocate_lock, error + # hack: we can't have prebuilt locks if we're translating. + # In this special situation we should just not lock at all + # (translation is not multithreaded anyway). + if not we_are_translated() and self.config.translating: + raise CannotHaveLock() try: return allocate_lock() except error: diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -4,7 +4,7 @@ from rpython.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import ObjSpace, W_Root +from pypy.interpreter.baseobjspace import ObjSpace, W_Root, CannotHaveLock from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror @@ -33,19 +33,24 @@ def _try_acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock - if self.slock is None: - self.slock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.slockowner is me: return False # already acquired by the current thread - self.slock.acquire(True) + try: + if self.slock is None: + self.slock = self.space.allocate_lock() + except CannotHaveLock: + pass + else: + self.slock.acquire(True) assert self.slockowner is None self.slockowner = me return True def _release_lock(self): self.slockowner = None - self.slock.release() + if self.slock is not None: + self.slock.release() def lock(self): if not self._try_acquire_lock(): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -8,7 +8,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, generic_new_descr from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock from pypy.interpreter.eval import Code from pypy.interpreter.pycode import PyCode from rpython.rlib import streamio, jit @@ -754,26 +754,14 @@ me = self.space.getexecutioncontext() # used as thread ident return self.lockowner is me - def _can_have_lock(self): - # hack: we can't have self.lock != None during translation, - # because prebuilt lock objects are not allowed. In this - # special situation we just don't lock at all (translation is - # not multithreaded anyway). - if we_are_translated(): - return True # we need a lock at run-time - elif self.space.config.translating: - assert self.lock is None - return False - else: - return True # in py.py - def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock if self.lock is None: - if not self._can_have_lock(): + try: + self.lock = self.space.allocate_lock() + except CannotHaveLock: return - self.lock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is me: pass # already acquired by the current thread @@ -791,7 +779,7 @@ # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return - if not self._can_have_lock(): + if self.lock is None: # CannotHaveLock occurred return space = self.space raise OperationError(space.w_RuntimeError, diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -97,6 +97,7 @@ self.indices = [0] * len(self.shape_m1) self.offset = self.array.start + @jit.unroll_safe def next(self): self.index += 1 for i in xrange(self.ndim_m1, -1, -1): @@ -108,6 +109,7 @@ self.indices[i] = 0 self.offset -= self.backstrides[i] + @jit.unroll_safe def next_skip_x(self, step): assert step >= 0 if step == 0: diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -442,6 +442,7 @@ return v1 % v2 @simple_binary_op + @jit.unroll_iff(lambda self, v1, v2: jit.isconstant(v2)) def pow(self, v1, v2): if v2 < 0: return 0 diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -153,6 +153,9 @@ def __exit__(self, *args): self.release() + def _cleanup_(self): + raise Exception("seeing a prebuilt rpython.rlib.rthread.Lock instance") + # ____________________________________________________________ # # Stack size From noreply at buildbot.pypy.org Wed Apr 16 15:54:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 15:54:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Unsure of this fix, but should help fix translation Message-ID: <20140416135423.E195F1C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70656:28f12116ef3d Date: 2014-04-16 15:53 +0200 http://bitbucket.org/pypy/pypy/changeset/28f12116ef3d/ Log: Unsure of this fix, but should help fix translation diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -442,7 +442,7 @@ return v1 % v2 @simple_binary_op - @jit.unroll_iff(lambda self, v1, v2: jit.isconstant(v2)) + @jit.look_inside_iff(lambda self, v1, v2: jit.isconstant(v2)) def pow(self, v1, v2): if v2 < 0: return 0 From noreply at buildbot.pypy.org Wed Apr 16 15:54:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 15:54:27 +0200 (CEST) Subject: [pypy-commit] pypy issue1514: hg merge default Message-ID: <20140416135427.B017C1C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: issue1514 Changeset: r70657:faf6b124f8f7 Date: 2014-04-16 15:53 +0200 http://bitbucket.org/pypy/pypy/changeset/faf6b124f8f7/ Log: hg merge default diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -442,7 +442,7 @@ return v1 % v2 @simple_binary_op - @jit.unroll_iff(lambda self, v1, v2: jit.isconstant(v2)) + @jit.look_inside_iff(lambda self, v1, v2: jit.isconstant(v2)) def pow(self, v1, v2): if v2 < 0: return 0 From noreply at buildbot.pypy.org Wed Apr 16 16:03:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 16:03:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the skipping condition (it was "skip always" by mistake) Message-ID: <20140416140329.F2A6B1C12E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70658:3a448de2c18d Date: 2014-04-16 16:02 +0200 http://bitbucket.org/pypy/pypy/changeset/3a448de2c18d/ Log: Fix the skipping condition (it was "skip always" by mistake) diff --git a/rpython/rlib/test/test_rlocale.py b/rpython/rlib/test/test_rlocale.py --- a/rpython/rlib/test/test_rlocale.py +++ b/rpython/rlib/test/test_rlocale.py @@ -37,7 +37,7 @@ assert isinstance(grouping, str) def test_libintl(): - if sys.platform != "darwin" or not sys.platform.startswith("linux"): + if sys.platform != "darwin" and not sys.platform.startswith("linux"): py.test.skip("there is (maybe) no libintl here") _gettext = external('gettext', [rffi.CCHARP], rffi.CCHARP) res = _gettext("1234") From noreply at buildbot.pypy.org Wed Apr 16 16:21:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 16:21:15 +0200 (CEST) Subject: [pypy-commit] pypy default: jit.isconstant(float) Message-ID: <20140416142115.DB7051C0476@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70659:31fa36a1d761 Date: 2014-04-16 16:20 +0200 http://bitbucket.org/pypy/pypy/changeset/31fa36a1d761/ Log: jit.isconstant(float) diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -887,6 +887,10 @@ def bhimpl_int_isconstant(x): return False + @arguments("f", returns="i") + def bhimpl_float_isconstant(x): + return False + @arguments("r", returns="i") def bhimpl_ref_isconstant(x): return False diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1169,7 +1169,9 @@ def _opimpl_isconstant(self, box): return ConstInt(isinstance(box, Const)) - opimpl_int_isconstant = opimpl_ref_isconstant = _opimpl_isconstant + opimpl_int_isconstant = _opimpl_isconstant + opimpl_ref_isconstant = _opimpl_isconstant + opimpl_float_isconstant = _opimpl_isconstant @arguments("box") def _opimpl_isvirtual(self, box): diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3329,6 +3329,25 @@ assert res == main(1, 10, 2) self.check_resops(call=0) + def test_look_inside_iff_const_float(self): + @look_inside_iff(lambda arg: isconstant(arg)) + def f(arg): + return arg + 0.5 + + driver = JitDriver(greens = [], reds = ['n', 'total']) + + def main(n): + total = 0.0 + while n > 0: + driver.jit_merge_point(n=n, total=total) + total = f(total) + n -= 1 + return total + + res = self.meta_interp(main, [10], enable_opts='') + assert res == 5.0 + self.check_resops(call=1) + def test_look_inside_iff_virtual(self): # There's no good reason for this to be look_inside_iff, but it's a test! @look_inside_iff(lambda arg, n: isvirtual(arg)) From noreply at buildbot.pypy.org Wed Apr 16 16:21:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 16:21:42 +0200 (CEST) Subject: [pypy-commit] pypy issue1514: hg merge default Message-ID: <20140416142142.5F7241C0476@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: issue1514 Changeset: r70660:aed75d5736d6 Date: 2014-04-16 16:20 +0200 http://bitbucket.org/pypy/pypy/changeset/aed75d5736d6/ Log: hg merge default diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -887,6 +887,10 @@ def bhimpl_int_isconstant(x): return False + @arguments("f", returns="i") + def bhimpl_float_isconstant(x): + return False + @arguments("r", returns="i") def bhimpl_ref_isconstant(x): return False diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1169,7 +1169,9 @@ def _opimpl_isconstant(self, box): return ConstInt(isinstance(box, Const)) - opimpl_int_isconstant = opimpl_ref_isconstant = _opimpl_isconstant + opimpl_int_isconstant = _opimpl_isconstant + opimpl_ref_isconstant = _opimpl_isconstant + opimpl_float_isconstant = _opimpl_isconstant @arguments("box") def _opimpl_isvirtual(self, box): diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3329,6 +3329,25 @@ assert res == main(1, 10, 2) self.check_resops(call=0) + def test_look_inside_iff_const_float(self): + @look_inside_iff(lambda arg: isconstant(arg)) + def f(arg): + return arg + 0.5 + + driver = JitDriver(greens = [], reds = ['n', 'total']) + + def main(n): + total = 0.0 + while n > 0: + driver.jit_merge_point(n=n, total=total) + total = f(total) + n -= 1 + return total + + res = self.meta_interp(main, [10], enable_opts='') + assert res == 5.0 + self.check_resops(call=1) + def test_look_inside_iff_virtual(self): # There's no good reason for this to be look_inside_iff, but it's a test! @look_inside_iff(lambda arg, n: isvirtual(arg)) diff --git a/rpython/rlib/test/test_rlocale.py b/rpython/rlib/test/test_rlocale.py --- a/rpython/rlib/test/test_rlocale.py +++ b/rpython/rlib/test/test_rlocale.py @@ -37,7 +37,7 @@ assert isinstance(grouping, str) def test_libintl(): - if sys.platform != "darwin" or not sys.platform.startswith("linux"): + if sys.platform != "darwin" and not sys.platform.startswith("linux"): py.test.skip("there is (maybe) no libintl here") _gettext = external('gettext', [rffi.CCHARP], rffi.CCHARP) res = _gettext("1234") From noreply at buildbot.pypy.org Wed Apr 16 17:40:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 17:40:08 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Use stm stack markers Message-ID: <20140416154008.E8B651C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70661:7ea6e9a3e2b8 Date: 2014-04-16 17:30 +0200 http://bitbucket.org/pypy/pypy/changeset/7ea6e9a3e2b8/ Log: Use stm stack markers diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -859,11 +859,18 @@ # (ebp is a writeable object and does not need a write-barrier # again (ensured by the code calling the loop)) self.mc.MOV(ebx, self.heap_shadowstack_top()) - self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) - # MOV [ebx], ebp if self.cpu.gc_ll_descr.stm: + self.mc.MOV_mi((self.SEGMENT_NO, ebx.value, 0), + rstm.STM_STACK_MARKER_NEW) # MOV [ebx], MARKER_NEW + self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, WORD), + ebp.value) # MOV [ebx+WORD], ebp self.mc.MOV_sr(STM_OLD_SHADOWSTACK, ebx.value) - self.mc.ADD_ri(ebx.value, WORD) + # MOV [esp+xx], ebx + self.mc.ADD_ri(ebx.value, 2 * WORD) + else: + self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), + ebp.value) # MOV [ebx], ebp + self.mc.ADD_ri(ebx.value, WORD) self.mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx def _call_footer_shadowstack(self): diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -12,6 +12,8 @@ TID = rffi.UINT tid_offset = CFlexSymbolic('offsetof(struct rpyobj_s, tid)') stm_nb_segments = CFlexSymbolic('STM_NB_SEGMENTS') +stm_stack_marker_new = CFlexSymbolic('STM_STACK_MARKER_NEW') +stm_stack_marker_old = CFlexSymbolic('STM_STACK_MARKER_OLD') adr_nursery_free = CFlexSymbolic('((long)&STM_SEGMENT->nursery_current)') adr_nursery_top = CFlexSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_pypy_stm_nursery_low_fill_mark = ( From noreply at buildbot.pypy.org Wed Apr 16 17:40:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 17:40:10 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/918b1901b1f9 Message-ID: <20140416154010.1C4431C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70662:4ca36098b3de Date: 2014-04-16 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/4ca36098b3de/ Log: import stmgc/918b1901b1f9 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -a158a889e78b +918b1901b1f9 diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -380,8 +380,8 @@ struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(current->ss != (object_t *)-1); - mark_visit_object(current->ss, segment_base); + if (((uintptr_t)current->ss) > STM_STACK_MARKER_OLD) + mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -157,10 +157,32 @@ stm_thread_local_t *tl = STM_SEGMENT->running_thread; struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; - while (current-- != base) { - assert(current->ss != (object_t *)-1); - minor_trace_if_young(¤t->ss); + while (1) { + --current; + OPT_ASSERT(current >= base); + + switch ((uintptr_t)current->ss) { + + case 0: /* NULL */ + continue; + + case STM_STACK_MARKER_NEW: + /* the marker was not already seen: mark it as seen, + but continue looking more deeply in the shadowstack */ + current->ss = (object_t *)STM_STACK_MARKER_OLD; + continue; + + case STM_STACK_MARKER_OLD: + /* the marker was already seen: we can stop the + root stack tracing at this point */ + goto interrupt; + + default: + /* the stack entry is a regular pointer */ + minor_trace_if_young(¤t->ss); + } } + interrupt: minor_trace_if_young(&tl->thread_local_obj); } diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -154,11 +154,13 @@ struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; + STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD); } static void _done_shadow_stack(stm_thread_local_t *tl) { - assert(tl->shadowstack >= tl->shadowstack_base); + assert(tl->shadowstack > tl->shadowstack_base); + assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD); char *start = (char *)tl->shadowstack_base; _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -71,7 +71,7 @@ s_mutex_lock(); fprintf(stderr, "thread %p:\n", tl); for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %9u %.3f s\n", + fprintf(stderr, " %-24s %9u %8.3f s\n", timer_names[i], tl->events[i], (double)tl->timing[i]); } s_mutex_unlock(); diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -265,6 +265,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) +#define STM_STACK_MARKER_NEW 1 +#define STM_STACK_MARKER_OLD 2 /* Every thread needs to have a corresponding stm_thread_local_t From noreply at buildbot.pypy.org Wed Apr 16 17:40:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 17:40:11 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Kill what remains of the previous approach to abort_info Message-ID: <20140416154011.4B0E81C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70663:de599561d2a5 Date: 2014-04-16 17:32 +0200 http://bitbucket.org/pypy/pypy/changeset/de599561d2a5/ Log: Kill what remains of the previous approach to abort_info diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -56,10 +56,6 @@ return frame def enter(self, frame): - if self.space.config.translation.stm: - if not self.space.config.translation.jit: # XXX - from pypy.module.thread.stm import enter_frame - enter_frame(self, frame) frame.f_backref = self.topframeref self.topframeref = jit.virtual_ref(frame) @@ -81,11 +77,6 @@ frame_vref() jit.virtual_ref_finish(frame_vref, frame) - if self.space.config.translation.stm: - if not self.space.config.translation.jit: # XXX - from pypy.module.thread.stm import leave_frame - leave_frame(self, frame) - # ________________________________________________________________ def c_call_trace(self, frame, w_func, args=None): diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -41,21 +41,6 @@ if not we_are_translated() and not hasattr(ec, '_thread_local_dicts'): initialize_execution_context(ec) - at jit.dont_look_inside # XXX: handle abort_info_push in JIT -def enter_frame(ec, frame): - """Called from ExecutionContext.enter().""" - if frame.hide(): - return - rstm.abort_info_push(frame.pycode, ('[', 'co_filename', 'co_name', - 'co_firstlineno', 'co_lnotab')) - rstm.abort_info_push(frame, ('last_instr', ']')) - -def leave_frame(ec, frame): - """Called from ExecutionContext.leave().""" - if frame.hide(): - return - rstm.abort_info_pop(2) - class STMThreadLocals(BaseThreadLocals): diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -80,18 +80,6 @@ def is_atomic(): return llop.stm_get_atomic(lltype.Signed) -def abort_info_push(instance, fieldnames): - "Special-cased below." - - at dont_look_inside -def abort_info_pop(count): - if we_are_translated(): - llop.stm_abort_info_pop(lltype.Void, count) - - at dont_look_inside -def charp_inspect_abort_info(): - return llop.stm_inspect_abort_info(rffi.CCHARP) - @dont_look_inside def abort_and_retry(): llop.stm_abort_and_retry(lltype.Void) @@ -161,58 +149,6 @@ # ____________________________________________________________ -class AbortInfoPush(ExtRegistryEntry): - _about_ = abort_info_push - - def compute_result_annotation(self, s_instance, s_fieldnames): - from rpython.annotator.model import SomeInstance - assert isinstance(s_instance, SomeInstance) - assert s_fieldnames.is_constant() - assert isinstance(s_fieldnames.const, tuple) # tuple of names - - def specialize_call(self, hop): - fieldnames = hop.args_s[1].const - lst = [] - v_instance = hop.inputarg(hop.args_r[0], arg=0) - for fieldname in fieldnames: - if fieldname == '[': - lst.append(-2) # start of sublist - continue - if fieldname == ']': - lst.append(-1) # end of sublist - continue - fieldname = 'inst_' + fieldname - extraofs = None - STRUCT = v_instance.concretetype.TO - while not hasattr(STRUCT, fieldname): - STRUCT = STRUCT.super - TYPE = getattr(STRUCT, fieldname) - if TYPE == lltype.Signed: - kind = 1 - elif TYPE == lltype.Unsigned: - kind = 2 - elif TYPE == lltype.Ptr(rstr.STR): - kind = 3 - extraofs = llmemory.offsetof(rstr.STR, 'chars') - else: - raise NotImplementedError( - "abort_info_push(%s, %r): field of type %r" - % (STRUCT.__name__, fieldname, TYPE)) - lst.append(kind) - lst.append(llmemory.offsetof(STRUCT, fieldname)) - if extraofs is not None: - lst.append(extraofs) - lst.append(0) - ARRAY = rffi.CArray(lltype.Signed) - array = lltype.malloc(ARRAY, len(lst), flavor='raw', immortal=True) - for i in range(len(lst)): - array[i] = lst[i] - c_array = hop.inputconst(lltype.Ptr(ARRAY), array) - hop.exception_cannot_occur() - hop.genop('stm_abort_info_push', [v_instance, c_array]) - -# ____________________________________________________________ - class ThreadLocalReference(object): _COUNT = 1 From noreply at buildbot.pypy.org Wed Apr 16 17:40:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 17:40:12 +0200 (CEST) Subject: [pypy-commit] pypy issue1514: ready for merge Message-ID: <20140416154012.6FD791C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: issue1514 Changeset: r70664:3dff2f025058 Date: 2014-04-16 17:36 +0200 http://bitbucket.org/pypy/pypy/changeset/3dff2f025058/ Log: ready for merge From noreply at buildbot.pypy.org Wed Apr 16 17:40:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Apr 2014 17:40:13 +0200 (CEST) Subject: [pypy-commit] pypy default: hg merge issue1514 Message-ID: <20140416154013.B25141C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70665:c2c709110379 Date: 2014-04-16 17:38 +0200 http://bitbucket.org/pypy/pypy/changeset/c2c709110379/ Log: hg merge issue1514 Support strange manipulations of sys.modules (thanks yamt) involving deleting built-in modules from sys.modules and reimporting them. Required by eventlet. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -443,10 +443,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -462,9 +463,20 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # Initialize the module + # Add the module to sys.modules and initialize the module + # The order is important to avoid recursions. from pypy.interpreter.module import Module if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) + # eventlet patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 + # w_mod.init(self) # Add the module to sys.modules diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,7 +579,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,7 +203,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -223,7 +222,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,7 +578,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -586,7 +585,6 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Wed Apr 16 18:40:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 16 Apr 2014 18:40:39 +0200 (CEST) Subject: [pypy-commit] pypy default: silence import RuntimeWarning Message-ID: <20140416164039.274E31C02FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70666:e370d5d04364 Date: 2014-04-16 12:18 -0400 http://bitbucket.org/pypy/pypy/changeset/e370d5d04364/ Log: silence import RuntimeWarning diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import math import _numpypy From noreply at buildbot.pypy.org Wed Apr 16 18:44:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 16 Apr 2014 18:44:05 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140416164405.E43E81D236E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70667:db4a8be80c49 Date: 2014-04-16 12:43 -0400 http://bitbucket.org/pypy/pypy/changeset/db4a8be80c49/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,4 +133,7 @@ .. branch: ast-issue1673 fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field \ No newline at end of file +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules From noreply at buildbot.pypy.org Wed Apr 16 19:27:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 16 Apr 2014 19:27:59 +0200 (CEST) Subject: [pypy-commit] pypy default: update micronumpy test_zjit Message-ID: <20140416172759.3AB0D1C0476@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70668:5191bc7c4184 Date: 2014-04-16 13:26 -0400 http://bitbucket.org/pypy/pypy/changeset/5191bc7c4184/ Log: update micronumpy test_zjit diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -482,16 +482,19 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'call': 2, - 'getfield_gc': 2, - 'guard_no_exception': 2, + 'getarrayitem_gc': 4, + 'getfield_gc': 4, 'guard_not_invalidated': 1, - 'guard_true': 1, + 'guard_true': 3, + 'int_add': 6, 'int_gt': 1, + 'int_lt': 2, 'int_sub': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, + 'setarrayitem_gc': 2, + 'setfield_gc': 4, }) def define_dot(): @@ -506,36 +509,43 @@ result = self.run("dot") assert result == 184 self.check_trace_count(3) - self.check_simple_loop({'float_add': 1, - 'float_mul': 1, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 3, - 'int_lt': 1, - 'jump': 1, - 'raw_load': 2}) - self.check_resops({'arraylen_gc': 1, - 'call': 3, - 'float_add': 2, - 'float_mul': 2, - 'getfield_gc': 26, - 'getfield_gc_pure': 24, - 'guard_class': 4, - 'guard_false': 2, - 'guard_no_exception': 3, - 'guard_nonnull': 12, - 'guard_nonnull_class': 4, - 'guard_not_invalidated': 2, - 'guard_true': 9, - 'guard_value': 4, - 'int_add': 6, - 'int_ge': 3, - 'int_lt': 4, - 'jump': 3, - 'new_array': 1, - 'raw_load': 6, - 'raw_store': 1, - 'setfield_gc': 3}) + self.check_simple_loop({ + 'float_add': 1, + 'float_mul': 1, + 'guard_not_invalidated': 1, + 'guard_true': 1, + 'int_add': 3, + 'int_lt': 1, + 'jump': 1, + 'raw_load': 2, + }) + self.check_resops({ + 'arraylen_gc': 1, + 'float_add': 2, + 'float_mul': 2, + 'getarrayitem_gc': 11, + 'getarrayitem_gc_pure': 15, + 'getfield_gc': 35, + 'getfield_gc_pure': 39, + 'guard_class': 4, + 'guard_false': 14, + 'guard_nonnull': 12, + 'guard_nonnull_class': 4, + 'guard_not_invalidated': 2, + 'guard_true': 13, + 'guard_value': 4, + 'int_add': 25, + 'int_ge': 4, + 'int_le': 8, + 'int_lt': 11, + 'int_sub': 4, + 'jump': 3, + 'new_array': 1, + 'raw_load': 6, + 'raw_store': 1, + 'setarrayitem_gc': 8, + 'setfield_gc': 15, + }) def define_argsort(): return """ From noreply at buildbot.pypy.org Thu Apr 17 00:28:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 00:28:12 +0200 (CEST) Subject: [pypy-commit] pypy default: eliminate duplicate getarrayitem_gc in numpy iterator next Message-ID: <20140416222812.E43F41C02FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70669:4b3a31de60d3 Date: 2014-04-16 18:22 -0400 http://bitbucket.org/pypy/pypy/changeset/4b3a31de60d3/ Log: eliminate duplicate getarrayitem_gc in numpy iterator next diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -101,8 +101,9 @@ def next(self): self.index += 1 for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < self.shape_m1[i]: - self.indices[i] += 1 + idx = self.indices[i] + if idx < self.shape_m1[i]: + self.indices[i] = idx + 1 self.offset += self.strides[i] break else: @@ -116,8 +117,9 @@ return self.index += step for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < (self.shape_m1[i] + 1) - step: - self.indices[i] += step + idx = self.indices[i] + if idx < (self.shape_m1[i] + 1) - step: + self.indices[i] = idx + step self.offset += self.strides[i] * step break else: diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -482,7 +482,7 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'getarrayitem_gc': 4, + 'getarrayitem_gc': 2, 'getfield_gc': 4, 'guard_not_invalidated': 1, 'guard_true': 3, @@ -523,7 +523,7 @@ 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, - 'getarrayitem_gc': 11, + 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, 'getfield_gc': 35, 'getfield_gc_pure': 39, From noreply at buildbot.pypy.org Thu Apr 17 01:34:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 01:34:33 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: disable 'external_loop' and 'buffering' flags pending future refactor, fix translation Message-ID: <20140416233433.32B411C0476@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70670:873c97659fd1 Date: 2014-04-16 05:57 +0300 http://bitbucket.org/pypy/pypy/changeset/873c97659fd1/ Log: disable 'external_loop' and 'buffering' flags pending future refactor, fix translation diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -155,8 +155,12 @@ 'expected string or Unicode object, %s found' % typename)) item = space.str_w(w_item) if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) nditer.external_loop = True elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) # For numpy compatability nditer.buffered = True elif item == 'c_index': @@ -307,7 +311,7 @@ get_readwrite_slice) self.op_flags[i].allocate = True continue - if self.op_flags[i] == 'w': + if self.op_flags[i].rw == 'w': continue out_dtype = ufuncs.find_binop_result_dtype(space, self.seq[i].get_dtype(), out_dtype) @@ -348,7 +352,7 @@ l = axis_len elif axis_len != l: raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) - self.op_axes.append([space.int_w(x) if not space.is_none(x) else space.w_None for x in space.listview(w_axis)]) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) if l == -1: raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) raise Exception('xxx TODO') @@ -441,7 +445,7 @@ l_w = [] for op in self.seq: l_w.append(op.descr_view(space)) - return space.newlist(l_w) + return space.newlist(l_w) def descr_get_dtypes(self, space): res = [None] * len(self.seq) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -38,6 +38,10 @@ def test_external_loop(self): from numpy import arange, nditer, array a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') r = [] n = 0 for x in nditer(a, flags=['external_loop']): @@ -115,13 +119,17 @@ it[0] = it.multi_index[1] - it.multi_index[0] it.iternext() assert (a == [[0, 1, 2], [-1, 0, 1]]).all() - b = zeros((2, 3)) - exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) - assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") def test_buffered(self): from numpy import arange, nditer, array a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') r = [] for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): r.append(x) @@ -189,6 +197,10 @@ def test_outarg(self): from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') def square1(a): it = nditer([a, None]) @@ -215,6 +227,10 @@ def test_outer_product(self): from numpy import nditer, arange a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') b = arange(8).reshape(2,4) it = nditer([a, b, None], flags=['external_loop'], op_axes=[[0, -1, -1], [-1, 0, 1], None]) From noreply at buildbot.pypy.org Thu Apr 17 01:34:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 01:34:34 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: implement op_dtypes Message-ID: <20140416233434.68F321C0476@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70671:0dca5996f880 Date: 2014-04-16 23:10 +0300 http://bitbucket.org/pypy/pypy/changeset/0dca5996f880/ Log: implement op_dtypes diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -7,6 +7,7 @@ shape_agreement, shape_agreement_multiple) from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype from pypy.module.micronumpy import ufuncs @@ -201,8 +202,8 @@ else: raise NotImplementedError('not implemented yet') -def get_iter(space, order, arr, shape): - imp = arr.implementation +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation.astype(space, dtype) backward = is_backward(imp, order) if (imp.strides[0] < imp.strides[-1] and not backward) or \ (imp.strides[0] > imp.strides[-1] and backward): @@ -291,8 +292,13 @@ if not space.is_none(w_op_axes): self.set_op_axes(space, w_op_axes) if not space.is_none(w_op_dtypes): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'nditer op_dtypes kwarg not implemented yet')) + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] self.iters=[] outargs = [i for i in range(len(self.seq)) \ if self.seq[i] is None or self.op_flags[i].rw == 'w'] @@ -304,7 +310,7 @@ shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation - out_dtype = None + out_dtype = self.dtypes[0] if len(self.dtypes) > 0 else None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].get_it_item = (get_readwrite_item, @@ -331,6 +337,19 @@ else: backward = self.order != self.tracked_index self.index_iter = IndexIterator(iter_shape, backward=backward) + if len(self.dtypes) > 0: + # Make sure dtypes make sense + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d and not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering")) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] if self.external_loop: for i in range(len(self.seq)): self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, @@ -338,7 +357,8 @@ else: for i in range(len(self.seq)): self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i], iter_shape), self.op_flags[i])) + self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -140,11 +140,7 @@ def test_op_dtype(self): from numpy import arange, nditer, sqrt, array - import sys a = arange(6).reshape(2,3) - 3 - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, nditer, a, op_dtypes=['complex']) - skip('nditer op_dtypes kwarg not implemented yet') exc = raises(TypeError, nditer, a, op_dtypes=['complex']) assert str(exc.value).startswith("Iterator operand required copying or buffering") r = [] @@ -154,7 +150,7 @@ assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, 1+0j, 1.41421356237+0j]).sum()) < 1e-5 r = [] - for x in nditer(a, flags=['buffered'], + for x in nditer(a, op_flags=['copy'], op_dtypes=['complex128']): r.append(sqrt(x)) assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, From noreply at buildbot.pypy.org Thu Apr 17 01:34:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 01:34:35 +0200 (CEST) Subject: [pypy-commit] pypy default: os.path.isdir calls nt.isdir on win32 which is not rpython after PyWin is installed to host python Message-ID: <20140416233435.A677C1C0476@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70672:5494b1aac76f Date: 2014-04-16 06:07 +0300 http://bitbucket.org/pypy/pypy/changeset/5494b1aac76f/ Log: os.path.isdir calls nt.isdir on win32 which is not rpython after PyWin is installed to host python diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,7 +2,7 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat +import sys, os, stat, genericpath from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -522,7 +522,8 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - if os.path.isdir(filepart) and case_ok(filepart): + # os.path.isdir on win32 is not rpython when pywin32 installed + if genericpath.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): From noreply at buildbot.pypy.org Thu Apr 17 01:34:37 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 01:34:37 +0200 (CEST) Subject: [pypy-commit] pypy default: merge default into branch Message-ID: <20140416233437.0C3911C0476@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70673:bace5e5bd016 Date: 2014-04-16 23:10 +0300 http://bitbucket.org/pypy/pypy/changeset/bace5e5bd016/ Log: merge default into branch diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -458,6 +458,13 @@ return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) + def readonly(self): + return NonWritableSlice(self.start, self.strides, self.backstrides, self.shape, self.parent, self.orig_arr, self.dtype) + +class NonWritableSlice(SliceArray): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) class VoidBoxStorage(BaseConcreteArray): def __init__(self, size, dtype): diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -142,6 +143,37 @@ def setitem(self, elem): self.array.setitem(self.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/nditer.py @@ -0,0 +1,577 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.error import OperationError +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy import ufuncs + + +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, space, array): + raise NotImplementedError("Abstract Class") + +class IteratorMixin(object): + _mixin_ = True + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item[self.index](space, array, self.it) + + def setitem(self, space, array, val): + xxx + +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 + +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 + +def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + ret = [] + if space.is_w(w_op_flags, space.w_None): + for i in range(n): + ret.append(OpFlag()) + elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + else: + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) + return ret + +class OpFlag(object): + def __init__(self): + self.rw = 'r' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + self.get_it_item = (get_readonly_item, get_readonly_slice) + +def get_readonly_item(space, array, it): + return space.wrap(it.getitem()) + +def get_readwrite_item(space, array, it): + #create a single-value view (since scalars are not views) + res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) + #it.dtype.setitem(res, 0, it.getitem()) + return W_NDimArray(res) + +def get_readonly_slice(space, array, it): + return W_NDimArray(it.getslice().readonly()) + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + +def parse_op_flag(space, lst): + op_flag = OpFlag() + for w_item in lst: + item = space.str_w(w_item) + if item == 'readonly': + op_flag.rw = 'r' + elif item == 'readwrite': + op_flag.rw = 'rw' + elif item == 'writeonly': + op_flag.rw = 'w' + elif item == 'no_broadcast': + op_flag.broadcast = False + elif item == 'contig': + op_flag.force_contig = True + elif item == 'aligned': + op_flag.force_align = True + elif item == 'nbo': + op_flag.native_byte_order = True + elif item == 'copy': + op_flag.tmp_copy = 'r' + elif item == 'updateifcopy': + op_flag.tmp_copy = 'rw' + elif item == 'allocate': + op_flag.allocate = True + elif item == 'no_subtype': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"no_subtype" op_flag not implemented yet')) + elif item == 'arraymask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"arraymask" op_flag not implemented yet')) + elif item == 'writemask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"writemask" op_flag not implemented yet')) + else: + raise OperationError(space.w_ValueError, space.wrap( + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == 'r': + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) + elif op_flag.rw == 'rw': + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + return op_flag + +def parse_func_flags(space, nditer, w_flags): + if space.is_w(w_flags, space.w_None): + return + elif not space.isinstance_w(w_flags, space.w_tuple) and not \ + space.isinstance_w(w_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + 'Iter global flags must be a list or tuple of strings')) + lst = space.listview(w_flags) + for w_item in lst: + if not space.isinstance_w(w_item, space.w_str) and not \ + space.isinstance_w(w_item, space.w_unicode): + typename = space.type(w_item).getname(space) + raise OperationError(space.w_TypeError, space.wrap( + 'expected string or Unicode object, %s found' % typename)) + item = space.str_w(w_item) + if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) + nditer.external_loop = True + elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) + # For numpy compatability + nditer.buffered = True + elif item == 'c_index': + nditer.tracked_index = 'C' + elif item == 'f_index': + nditer.tracked_index = 'F' + elif item == 'multi_index': + nditer.tracked_index = 'multi' + elif item == 'common_dtype': + nditer.common_dtype = True + elif item == 'delay_bufalloc': + nditer.delay_bufalloc = True + elif item == 'grow_inner': + nditer.grow_inner = True + elif item == 'ranged': + nditer.ranged = True + elif item == 'refs_ok': + nditer.refs_ok = True + elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) + nditer.reduce_ok = True + elif item == 'zerosize_ok': + nditer.zerosize_ok = True + else: + raise OperationError(space.w_ValueError, space.wrap( + 'Unexpected iterator global flag "%s"' % item)) + if nditer.tracked_index and nditer.external_loop: + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order =='F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation.astype(space, dtype) + backward = is_backward(imp, order) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret + +class W_NDIter(W_Root): + + def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, + w_op_axes, w_itershape, w_buffersize, order): + self.order = order + self.external_loop = False + self.buffered = False + self.tracked_index = '' + self.common_dtype = False + self.delay_bufalloc = False + self.grow_inner = False + self.ranged = False + self.refs_ok = False + self.reduce_ok = False + self.zerosize_ok = False + self.index_iter = None + self.done = False + self.first_next = True + self.op_axes = [] + if space.isinstance_w(w_seq, space.w_tuple) or \ + space.isinstance_w(w_seq, space.w_list): + w_seq_as_list = space.listview(w_seq) + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + else: + self.seq =[convert_to_array(space, w_seq)] + parse_func_flags(space, self, w_flags) + self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, + len(self.seq), parse_op_flag) + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) + if not space.is_none(w_op_dtypes): + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] + self.iters=[] + outargs = [i for i in range(len(self.seq)) \ + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation + out_dtype = self.dtypes[0] if len(self.dtypes) > 0 else None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype(space, + self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + if self.tracked_index == "multi": + backward = False + else: + backward = self.order != self.tracked_index + self.index_iter = IndexIterator(iter_shape, backward=backward) + if len(self.dtypes) > 0: + # Make sure dtypes make sense + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d and not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering")) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] + if self.external_loop: + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + + def set_op_axes(self, space, w_op_axes): + if space.len_w(w_op_axes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + op_axes = space.listview(w_op_axes) + l = -1 + for w_axis in op_axes: + if not space.is_none(w_axis): + axis_len = space.len_w(w_axis) + if l == -1: + l = axis_len + elif axis_len != l: + raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + if l == -1: + raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 + + def descr_iter(self, space): + return space.wrap(self) + + def descr_getitem(self, space, w_idx): + idx = space.int_w(w_idx) + try: + ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + except IndexError: + raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + return ret + + def descr_setitem(self, space, w_idx, w_value): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_len(self, space): + space.wrap(len(self.iters)) + + def descr_next(self, space): + for it in self.iters: + if not it.done(): + break + else: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + res = [] + if self.index_iter: + if not self.first_next: + self.index_iter.next() + else: + self.first_next = False + for i in range(len(self.iters)): + res.append(self.iters[i].getitem(space, self.seq[i])) + self.iters[i].next() + if len(res) <2: + return res[0] + return space.newtuple(res) + + def iternext(self): + if self.index_iter: + self.index_iter.next() + for i in range(len(self.iters)): + self.iters[i].next() + for it in self.iters: + if not it.done(): + break + else: + self.done = True + return self.done + return self.done + + def descr_iternext(self, space): + return space.wrap(self.iternext()) + + def descr_copy(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_debug_print(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_enable_external_loop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + @unwrap_spec(axis=int) + def descr_remove_axis(self, space, axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_remove_multi_index(self, space, w_multi_index): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_reset(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_operands(self, space): + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) + + def descr_get_dtypes(self, space): + res = [None] * len(self.seq) + for i in range(len(self.seq)): + res[i] = self.seq[i].descr_get_dtype(space) + return space.newtuple(res) + + def descr_get_finished(self, space): + return space.wrap(self.done) + + def descr_get_has_delayed_bufalloc(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_has_index(self, space): + return space.wrap(self.tracked_index in ["C", "F"]) + + def descr_get_index(self, space): + if not self.tracked_index in ["C", "F"]: + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.wrap(self.index_iter.getvalue()) + + def descr_get_has_multi_index(self, space): + return space.wrap(self.tracked_index == "multi") + + def descr_get_multi_index(self, space): + if not self.tracked_index == "multi": + raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.newtuple([space.wrap(x) for x in self.index_iter.index]) + + def descr_get_iterationneedsapi(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_iterindex(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itersize(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itviews(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_ndim(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_nop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_shape(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_value(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + + at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes = WrappedDefault(None), order=str, + w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) +def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order='K'): + return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order) + +W_NDIter.typedef = TypeDef( + 'nditer', + __iter__ = interp2app(W_NDIter.descr_iter), + __getitem__ = interp2app(W_NDIter.descr_getitem), + __setitem__ = interp2app(W_NDIter.descr_setitem), + __len__ = interp2app(W_NDIter.descr_len), + + next = interp2app(W_NDIter.descr_next), + iternext = interp2app(W_NDIter.descr_iternext), + copy = interp2app(W_NDIter.descr_copy), + debug_print = interp2app(W_NDIter.descr_debug_print), + enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), + remove_axis = interp2app(W_NDIter.descr_remove_axis), + remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), + reset = interp2app(W_NDIter.descr_reset), + + operands = GetSetProperty(W_NDIter.descr_get_operands), + dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), + finished = GetSetProperty(W_NDIter.descr_get_finished), + has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), + has_index = GetSetProperty(W_NDIter.descr_get_has_index), + index = GetSetProperty(W_NDIter.descr_get_index), + has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), + multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), + iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), + iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), + itersize = GetSetProperty(W_NDIter.descr_get_itersize), + itviews = GetSetProperty(W_NDIter.descr_get_itviews), + ndim = GetSetProperty(W_NDIter.descr_get_ndim), + nop = GetSetProperty(W_NDIter.descr_get_nop), + shape = GetSetProperty(W_NDIter.descr_get_shape), + value = GetSetProperty(W_NDIter.descr_get_value), +) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -0,0 +1,294 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNDIter(BaseNumpyAppTest): + def test_basic(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + + for x in nditer(a.T): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + + def test_order(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a, order='C'): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + for x in nditer(a, order='F'): + r.append(x) + assert r == [0, 3, 1, 4, 2, 5] + + def test_readwrite(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + for x in nditer(a, op_flags=['readwrite']): + x[...] = 2 * x + assert (a == [[0, 2, 4], [6, 8, 10]]).all() + + def test_external_loop(self): + from numpy import arange, nditer, array + a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + r = [] + n = 0 + for x in nditer(a, flags=['external_loop']): + r.append(x) + n += 1 + assert n == 1 + assert (array(r) == range(24)).all() + r = [] + n = 0 + for x in nditer(a, flags=['external_loop'], order='F'): + r.append(x) + n += 1 + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + + def test_index(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + exc = None + try: + it.index + except ValueError, e: + exc = e + assert exc + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + + def test_interface(self): + from numpy import arange, nditer, zeros + import sys + a = arange(6).reshape(2,3) + r = [] + it = nditer(a, flags=['f_index']) + while not it.finished: + r.append((it[0], it.index)) + it.iternext() + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') + while not it.finished: + it[0] = it.multi_index[1] - it.multi_index[0] + it.iternext() + assert (a == [[0, 1, 2], [-1, 0, 1]]).all() + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + + def test_buffered(self): + from numpy import arange, nditer, array + a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') + r = [] + for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() + + def test_op_dtype(self): + from numpy import arange, nditer, sqrt, array + a = arange(6).reshape(2,3) - 3 + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) + assert str(exc.value).startswith("Iterator operand required copying or buffering") + r = [] + for x in nditer(a, op_flags=['readonly','copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + r = [] + for x in nditer(a, op_flags=['copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + + def test_casting(self): + from numpy import arange, nditer + import sys + a = arange(6.) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], + casting='same_kind'): + r.append(x) + assert r == [0., 1., 2., 3., 4., 5.] + exc = raises(TypeError, nditer, a, flags=['buffered'], + op_dtypes=['int32'], casting='same_kind') + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + b = arange(6) + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + op_flags=['readwrite'], casting='same_kind') + assert str(exc.value).startswith("Iterator requested dtype could not be cast") + + def test_broadcast(self): + from numpy import arange, nditer + a = arange(3) + b = arange(6).reshape(2,3) + r = [] + for x,y in nditer([a, b]): + r.append((x, y)) + assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] + a = arange(2) + exc = raises(ValueError, nditer, [a, b]) + assert str(exc.value).find('shapes (2) (2,3)') > 0 + + def test_outarg(self): + from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') + + def square1(a): + it = nditer([a, None]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square1([1, 2, 3]) == [1, 4, 9]).all() + + def square2(a, out=None): + it = nditer([a, out], flags=['external_loop', 'buffered'], + op_flags=[['readonly'], + ['writeonly', 'allocate', 'no_broadcast']]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square2([1, 2, 3]) == [1, 4, 9]).all() + b = zeros((3, )) + c = square2([1, 2, 3], out=b) + assert (c == [1., 4., 9.]).all() + assert (b == c).all() + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) + assert str(exc.value).find('cannot be broadcasted') > 0 + + def test_outer_product(self): + from numpy import nditer, arange + a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + b = arange(8).reshape(2,4) + it = nditer([a, b, None], flags=['external_loop'], + op_axes=[[0, -1, -1], [-1, 0, 1], None]) + for x, y, z in it: + z[...] = x*y + assert it.operands[2].shape == (3, 2, 4) + for i in range(a.size): + assert (it.operands[2][i] == a[i]*b).all() + + def test_reduction(self): + from numpy import nditer, arange, array + import sys + a = arange(24).reshape(2, 3, 4) + b = array(0) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') + #reduction operands must be readwrite + for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite']]): + y[...] += x + assert b == 276 + assert b == a.sum() + + # reduction and allocation requires op_axes and initialization + it = nditer([a, None], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + # previous example with buffering, requires more flags and reset + it = nditer([a, None], flags=['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + it.reset() + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + def test_get_dtypes(self): + from numpy import array, nditer + x = array([1, 2]) + y = array([1.0, 2.0]) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) + + def test_multi_index(self): + import numpy as np + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + res = [] + while not it.finished: + res.append((it[0], it.multi_index)) + it.iternext() + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] From noreply at buildbot.pypy.org Thu Apr 17 01:35:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 17 Apr 2014 01:35:07 +0200 (CEST) Subject: [pypy-commit] pypy default: revert part of 310dcc241b1f: go back to having the module in sys.modules before Message-ID: <20140416233507.76D961C0476@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70674:a7023a962605 Date: 2014-04-16 16:17 -0700 http://bitbucket.org/pypy/pypy/changeset/a7023a962605/ Log: revert part of 310dcc241b1f: go back to having the module in sys.modules before init, otherwise py3k runs into trouble while bootstrapping sys with the new reloading fix diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -462,26 +462,23 @@ raise oefmt(self.w_SystemError, "getbuiltinmodule() called with non-builtin module %s", name) - else: - # Add the module to sys.modules and initialize the module - # The order is important to avoid recursions. - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - if not reuse and w_mod.startup_called: - # create a copy of the module. (see issue1514) - # eventlet patcher relies on this behaviour. - w_mod2 = self.wrap(Module(self, w_name)) - self.setitem(w_modules, w_name, w_mod2) - w_mod.getdict(self) # unlazy w_initialdict - self.call_method(w_mod2.getdict(self), 'update', - w_mod.w_initialdict) - return w_mod2 - # - w_mod.init(self) - # Add the module to sys.modules + # Add the module to sys.modules and initialize the module. The + # order is important to avoid recursions. + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) eventlet + # patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 self.setitem(w_modules, w_name, w_mod) - return w_mod + w_mod.init(self) + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" From noreply at buildbot.pypy.org Thu Apr 17 01:35:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 17 Apr 2014 01:35:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140416233509.D5DB91C0476@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70675:8c349c4f0aa4 Date: 2014-04-16 16:22 -0700 http://bitbucket.org/pypy/pypy/changeset/8c349c4f0aa4/ Log: merge default diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -318,7 +318,7 @@ To read more about the RPython limitations read the `RPython description`_. -.. _`RPython description`: coding-guide.html#restricted-python +.. _`RPython description`: coding-guide.html#rpython-definition --------------------------------------------------------------- Does RPython have anything to do with Zope's Restricted Python? diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -92,7 +92,9 @@ `D07.1 Massive Parallelism and Translation Aspects`_ is a report about PyPy's optimization efforts, garbage collectors and massive parallelism (stackless) features. This report refers to the paper `PyPy's approach -to virtual machine construction`_. *(2007-02-28)* +to virtual machine construction`_. Extends the content previously +available in the document "Memory management and threading models as +translation aspects -- solutions and challenges". *(2007-02-28)* diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,4 +133,7 @@ .. branch: ast-issue1673 fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field \ No newline at end of file +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -339,6 +339,9 @@ return 'internal subclass of %s' % (Class.__name__,) wrappable_class_name._annspecialcase_ = 'specialize:memo' +class CannotHaveLock(Exception): + """Raised by space.allocate_lock() if we're translating.""" + # ____________________________________________________________ class ObjSpace(object): @@ -442,10 +445,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -460,15 +464,23 @@ raise oefmt(self.w_SystemError, "getbuiltinmodule() called with non-builtin module %s", name) - else: - # Initialize the module - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - w_mod.init(self) - # Add the module to sys.modules + # Add the module to sys.modules and initialize the module. The + # order is important to avoid recursions. + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) eventlet + # patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 self.setitem(w_modules, w_name, w_mod) - return w_mod + w_mod.init(self) + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" @@ -676,6 +688,11 @@ def __allocate_lock(self): from rpython.rlib.rthread import allocate_lock, error + # hack: we can't have prebuilt locks if we're translating. + # In this special situation we should just not lock at all + # (translation is not multithreaded anyway). + if not we_are_translated() and self.config.translating: + raise CannotHaveLock() try: return allocate_lock() except error: diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -14,6 +14,7 @@ # after startup(). w_initialdict = None lazy = False + submodule_name = None def __init__(self, space, w_name): """ NOT_RPYTHON """ @@ -31,6 +32,8 @@ space = self.space name = space.unwrap(self.w_name) for sub_name, module_cls in self.submodules.iteritems(): + if module_cls.submodule_name is None: + module_cls.submodule_name = sub_name module_name = space.wrap("%s.%s" % (name, sub_name)) m = module_cls(space, module_name) m.install() @@ -134,6 +137,8 @@ cls.loaders = loaders = {} pkgroot = cls.__module__ appname = cls.get_applevel_name() + if cls.submodule_name is not None: + appname += '.%s' % (cls.submodule_name,) for name, spec in cls.interpleveldefs.items(): loaders[name] = getinterpevalloader(pkgroot, spec) for name, spec in cls.appleveldefs.items(): diff --git a/pypy/module/__pypy__/app_signal.py b/pypy/module/__pypy__/app_signal.py --- a/pypy/module/__pypy__/app_signal.py +++ b/pypy/module/__pypy__/app_signal.py @@ -1,4 +1,9 @@ -import __pypy__.thread +import thread +# ^^ relative import of __pypy__.thread. Note that some tests depend on +# this (test_enable_signals in test_signal.py) to work properly, +# otherwise they get caught in some deadlock waiting for the import +# lock... + class SignalsEnabled(object): '''A context manager to use in non-main threads: @@ -8,7 +13,7 @@ that is within a "with signals_enabled:". This other thread should be ready to handle unexpected exceptions that the signal handler might raise --- notably KeyboardInterrupt.''' - __enter__ = __pypy__.thread._signals_enter - __exit__ = __pypy__.thread._signals_exit + __enter__ = thread._signals_enter + __exit__ = thread._signals_exit signals_enabled = SignalsEnabled() diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -8,6 +8,7 @@ def test_signal(self): from __pypy__ import thread + assert type(thread.signals_enabled).__module__ == '__pypy__.thread' with thread.signals_enabled: pass # assert did not crash diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -1,7 +1,6 @@ import py from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir UNICODE_REPLACEMENT_CHARACTER = u'\uFFFD' @@ -15,7 +14,7 @@ return 'EncodeDecodeError(%r, %r, %r)' % (self.start, self.end, self.reason) -srcdir = py.path.local(cdir) +srcdir = py.path.local(__file__).dirpath() codecs = [ # _codecs_cn diff --git a/rpython/translator/c/src/cjkcodecs/README b/pypy/module/_multibytecodec/src/cjkcodecs/README rename from rpython/translator/c/src/cjkcodecs/README rename to pypy/module/_multibytecodec/src/cjkcodecs/README diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_cn.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c rename from rpython/translator/c/src/cjkcodecs/_codecs_cn.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_hk.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c rename from rpython/translator/c/src/cjkcodecs/_codecs_hk.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c rename from rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_jp.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c rename from rpython/translator/c/src/cjkcodecs/_codecs_jp.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_kr.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c rename from rpython/translator/c/src/cjkcodecs/_codecs_kr.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_tw.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c rename from rpython/translator/c/src/cjkcodecs/_codecs_tw.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c diff --git a/rpython/translator/c/src/cjkcodecs/alg_jisx0201.h b/pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h rename from rpython/translator/c/src/cjkcodecs/alg_jisx0201.h rename to pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h diff --git a/rpython/translator/c/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h rename from rpython/translator/c/src/cjkcodecs/cjkcodecs.h rename to pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h diff --git a/rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h b/pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h rename from rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h rename to pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_cn.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h rename from rpython/translator/c/src/cjkcodecs/mappings_cn.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_hk.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h rename from rpython/translator/c/src/cjkcodecs/mappings_hk.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h rename from rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jp.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h rename from rpython/translator/c/src/cjkcodecs/mappings_jp.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_kr.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h rename from rpython/translator/c/src/cjkcodecs/mappings_kr.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_tw.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h rename from rpython/translator/c/src/cjkcodecs/mappings_tw.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c rename from rpython/translator/c/src/cjkcodecs/multibytecodec.c rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h rename from rpython/translator/c/src/cjkcodecs/multibytecodec.h rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,5 +1,6 @@ import py + at py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: py.test.skip("genreflex is not installed") diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -8,7 +8,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, generic_new_descr from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock from pypy.interpreter.eval import Code from pypy.interpreter.pycode import PyCode from rpython.rlib import streamio, jit @@ -585,7 +585,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None @@ -759,26 +760,14 @@ me = self.space.getexecutioncontext() # used as thread ident return self.lockowner is me - def _can_have_lock(self): - # hack: we can't have self.lock != None during translation, - # because prebuilt lock objects are not allowed. In this - # special situation we just don't lock at all (translation is - # not multithreaded anyway). - if we_are_translated(): - return True # we need a lock at run-time - elif self.space.config.translating: - assert self.lock is None - return False - else: - return True # in py.py - def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock if self.lock is None: - if not self._can_have_lock(): + try: + self.lock = self.space.allocate_lock() + except CannotHaveLock: return - self.lock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is me: pass # already acquired by the current thread @@ -796,7 +785,7 @@ # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return - if not self._can_have_lock(): + if self.lock is None: # CannotHaveLock occurred return space = self.space raise OperationError(space.w_RuntimeError, diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -219,7 +219,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -239,7 +238,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -652,7 +652,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -661,7 +660,6 @@ def test_reimport_builtin(self): import imp, sys, time - skip("fix me") oldpath = sys.path time.tzset = "" diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import math import _numpypy diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -97,25 +97,29 @@ self.indices = [0] * len(self.shape_m1) self.offset = self.array.start + @jit.unroll_safe def next(self): self.index += 1 for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < self.shape_m1[i]: - self.indices[i] += 1 + idx = self.indices[i] + if idx < self.shape_m1[i]: + self.indices[i] = idx + 1 self.offset += self.strides[i] break else: self.indices[i] = 0 self.offset -= self.backstrides[i] + @jit.unroll_safe def next_skip_x(self, step): assert step >= 0 if step == 0: return self.index += step for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < (self.shape_m1[i] + 1) - step: - self.indices[i] += step + idx = self.indices[i] + if idx < (self.shape_m1[i] + 1) - step: + self.indices[i] = idx + step self.offset += self.strides[i] * step break else: diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -482,16 +482,19 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'call': 2, - 'getfield_gc': 2, - 'guard_no_exception': 2, + 'getarrayitem_gc': 2, + 'getfield_gc': 4, 'guard_not_invalidated': 1, - 'guard_true': 1, + 'guard_true': 3, + 'int_add': 6, 'int_gt': 1, + 'int_lt': 2, 'int_sub': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, + 'setarrayitem_gc': 2, + 'setfield_gc': 4, }) def define_dot(): @@ -506,36 +509,43 @@ result = self.run("dot") assert result == 184 self.check_trace_count(3) - self.check_simple_loop({'float_add': 1, - 'float_mul': 1, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 3, - 'int_lt': 1, - 'jump': 1, - 'raw_load': 2}) - self.check_resops({'arraylen_gc': 1, - 'call': 3, - 'float_add': 2, - 'float_mul': 2, - 'getfield_gc': 26, - 'getfield_gc_pure': 24, - 'guard_class': 4, - 'guard_false': 2, - 'guard_no_exception': 3, - 'guard_nonnull': 12, - 'guard_nonnull_class': 4, - 'guard_not_invalidated': 2, - 'guard_true': 9, - 'guard_value': 4, - 'int_add': 6, - 'int_ge': 3, - 'int_lt': 4, - 'jump': 3, - 'new_array': 1, - 'raw_load': 6, - 'raw_store': 1, - 'setfield_gc': 3}) + self.check_simple_loop({ + 'float_add': 1, + 'float_mul': 1, + 'guard_not_invalidated': 1, + 'guard_true': 1, + 'int_add': 3, + 'int_lt': 1, + 'jump': 1, + 'raw_load': 2, + }) + self.check_resops({ + 'arraylen_gc': 1, + 'float_add': 2, + 'float_mul': 2, + 'getarrayitem_gc': 7, + 'getarrayitem_gc_pure': 15, + 'getfield_gc': 35, + 'getfield_gc_pure': 39, + 'guard_class': 4, + 'guard_false': 14, + 'guard_nonnull': 12, + 'guard_nonnull_class': 4, + 'guard_not_invalidated': 2, + 'guard_true': 13, + 'guard_value': 4, + 'int_add': 25, + 'int_ge': 4, + 'int_le': 8, + 'int_lt': 11, + 'int_sub': 4, + 'jump': 3, + 'new_array': 1, + 'raw_load': 6, + 'raw_store': 1, + 'setarrayitem_gc': 8, + 'setfield_gc': 15, + }) def define_argsort(): return """ diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -442,6 +442,7 @@ return v1 % v2 @simple_binary_op + @jit.look_inside_iff(lambda self, v1, v2: jit.isconstant(v2)) def pow(self, v1, v2): if v2 < 0: return 0 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -683,7 +683,7 @@ raise OperationError(space.w_ValueError, space.wrap("list modified during sort")) -find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') +find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find') class ListStrategy(object): @@ -709,8 +709,9 @@ space = self.space i = start # needs to be safe against eq_w mutating stuff + tp = space.type(w_item) while i < stop and i < w_list.length(): - find_jmp.jit_merge_point() + find_jmp.jit_merge_point(tp=tp) if space.eq_w(w_list.getitem(i), w_item): return i i += 1 diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1070,6 +1070,7 @@ def _intersect_wrapped(self, w_set, w_other): result = newset(self.space) for key in self.unerase(w_set.sstorage): + self.intersect_jmp.jit_merge_point() w_key = self.wrap(key) if w_other.has_key(w_key): result[w_key] = None @@ -1180,6 +1181,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(bytes).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1216,6 +1220,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(unicode).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1252,6 +1259,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(int).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1290,6 +1300,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(object).intersect') + def get_empty_storage(self): return self.erase(self.get_empty_dict()) @@ -1334,6 +1347,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(identity).intersect') + def get_empty_storage(self): return self.erase({}) diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -27,7 +27,7 @@ jit.loop_unrolling_heuristic(other, other.length(), UNROLL_CUTOFF)) -contains_jmp = jit.JitDriver(greens = [], reds = 'auto', +contains_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'tuple.contains') class W_AbstractTupleObject(W_Root): @@ -138,8 +138,9 @@ return space.w_False def _descr_contains_jmp(self, space, w_obj): + tp = space.type(w_obj) for w_item in self.tolist(): - contains_jmp.jit_merge_point() + contains_jmp.jit_merge_point(tp=tp) if space.eq_w(w_item, w_obj): return space.w_True return space.w_False diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -25,7 +25,7 @@ # self.path = space.unwrap(space.getattr( self.path = py.path.local(space.str_w(self.w_file)) self.space = space - + def fullsource(self): filename = self.space.str_w(self.w_file) source = py.code.Source(py.std.linecache.getlines(filename)) @@ -106,27 +106,28 @@ def exconly(self, tryshort=True): return '(application-level) ' + self.operr.errorstr(self.space) - def errisinstance(self, exc): - clsname = exc.__name__ + def errisinstance(self, exc): + clsname = exc.__name__ # we can only check for builtin exceptions # as there is no canonical applevel one for custom interplevel ones if exc.__module__ != "exceptions": - return False - try: - w_exc = getattr(self.space, 'w_' + clsname) - except KeyboardInterrupt: - raise - except: - pass - else: - return self.operr.match(self.space, w_exc) - return False + return False + try: + w_exc = getattr(self.space, 'w_' + clsname) + except KeyboardInterrupt: + raise + except: + pass + else: + return self.operr.match(self.space, w_exc) + return False def __str__(self): return '(application-level) ' + self.operr.errorstr(self.space) class AppTracebackEntry(py.code.Traceback.Entry): exprinfo = None + frame = None def __init__(self, space, tb): self.frame = AppFrame(space, space.getattr(tb, space.wrap('tb_frame'))) @@ -142,8 +143,11 @@ # XXX this reinterpret() is only here to prevent reinterpretation. return self.exprinfo -class AppTraceback(py.code.Traceback): - Entry = AppTracebackEntry + def ishidden(self): + return False + +class AppTraceback(py.code.Traceback): + Entry = AppTracebackEntry def __init__(self, space, apptb): l = [] @@ -151,7 +155,7 @@ l.append(self.Entry(space, apptb)) apptb = space.getattr(apptb, space.wrap('tb_next')) list.__init__(self, l) - + # ____________________________________________________________ def build_pytest_assertion(space): @@ -163,10 +167,10 @@ ## # Argh! we may see app-level helpers in the frame stack! ## # that's very probably very bad... ## ^^^the above comment may be outdated, but we are not sure - + # if the assertion provided a message, don't do magic args_w, kwargs_w = __args__.unpack() - if args_w: + if args_w: w_msg = args_w[0] else: frame = space.getexecutioncontext().gettopframe() @@ -174,7 +178,7 @@ try: source = runner.statement source = str(source).strip() - except py.error.ENOENT: + except py.error.ENOENT: source = None from pypy import conftest if source and py.test.config._assertstate.mode != "off": @@ -187,7 +191,7 @@ space.setattr(w_self, space.wrap('msg'), w_msg) # build a new AssertionError class to replace the original one. - w_BuiltinAssertionError = space.getitem(space.builtin.w_dict, + w_BuiltinAssertionError = space.getitem(space.builtin.w_dict, space.wrap('AssertionError')) w_metaclass = space.type(w_BuiltinAssertionError) w_init = space.wrap(gateway.interp2app_temp(my_init)) @@ -261,9 +265,9 @@ app_raises = gateway.interp2app_temp(pypyraises) -def pypyskip(space, w_message): - """skip a test at app-level. """ - msg = space.unwrap(w_message) +def pypyskip(space, w_message): + """skip a test at app-level. """ + msg = space.unwrap(w_message) py.test.skip(msg) app_skip = gateway.interp2app_temp(pypyskip) diff --git a/rpython/jit/backend/conftest.py b/rpython/jit/backend/conftest.py --- a/rpython/jit/backend/conftest.py +++ b/rpython/jit/backend/conftest.py @@ -6,7 +6,7 @@ def pytest_addoption(parser): group = parser.getgroup('random test options') - group.addoption('--random-seed', action="store", type="int", + group.addoption('--random-seed', action="store", type=int, default=random.randrange(0, 10000), dest="randomseed", help="choose a fixed random seed") @@ -15,19 +15,19 @@ choices=['llgraph', 'cpu'], dest="backend", help="select the backend to run the functions with") - group.addoption('--block-length', action="store", type="int", + group.addoption('--block-length', action="store", type=int, default=30, dest="block_length", help="insert up to this many operations in each test") - group.addoption('--n-vars', action="store", type="int", + group.addoption('--n-vars', action="store", type=int, default=10, dest="n_vars", help="supply this many randomly-valued arguments to " "the function") - group.addoption('--repeat', action="store", type="int", + group.addoption('--repeat', action="store", type=int, default=15, dest="repeat", help="run the test this many times"), - group.addoption('--output', '-O', action="store", type="str", + group.addoption('--output', '-O', action="store", type=str, default="", dest="output", help="dump output to a file") diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -887,6 +887,10 @@ def bhimpl_int_isconstant(x): return False + @arguments("f", returns="i") + def bhimpl_float_isconstant(x): + return False + @arguments("r", returns="i") def bhimpl_ref_isconstant(x): return False diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1169,7 +1169,9 @@ def _opimpl_isconstant(self, box): return ConstInt(isinstance(box, Const)) - opimpl_int_isconstant = opimpl_ref_isconstant = _opimpl_isconstant + opimpl_int_isconstant = _opimpl_isconstant + opimpl_ref_isconstant = _opimpl_isconstant + opimpl_float_isconstant = _opimpl_isconstant @arguments("box") def _opimpl_isvirtual(self, box): diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3329,6 +3329,25 @@ assert res == main(1, 10, 2) self.check_resops(call=0) + def test_look_inside_iff_const_float(self): + @look_inside_iff(lambda arg: isconstant(arg)) + def f(arg): + return arg + 0.5 + + driver = JitDriver(greens = [], reds = ['n', 'total']) + + def main(n): + total = 0.0 + while n > 0: + driver.jit_merge_point(n=n, total=total) + total = f(total) + n -= 1 + return total + + res = self.meta_interp(main, [10], enable_opts='') + assert res == 5.0 + self.check_resops(call=1) + def test_look_inside_iff_virtual(self): # There's no good reason for this to be look_inside_iff, but it's a test! @look_inside_iff(lambda arg, n: isvirtual(arg)) diff --git a/rpython/rlib/parsing/deterministic.py b/rpython/rlib/parsing/deterministic.py --- a/rpython/rlib/parsing/deterministic.py +++ b/rpython/rlib/parsing/deterministic.py @@ -11,7 +11,7 @@ characters; the result is a list of the first character in a run and the number of chars following, sorted with longer runs first. - + Example: 'abc' => [('a', 3)] Example: 'abcmxyz' => [('a',3),('x',3),('m',1)]""" # Find the runs. Creates a list like [['a',3],['m',1],['x',3]] @@ -25,7 +25,7 @@ else: # Found a 'hole', so create a new entry result += [[b, 1]] - + # Change the above list into a list of sorted tuples real_result = [(c,l) for [c,l] in result] # Sort longer runs first (hence -c), then alphabetically @@ -221,8 +221,8 @@ result.emit("i = 0") result.emit("state = 0") result.start_block("while 1:") - - # state_to_chars is a dict containing the sets of + + # state_to_chars is a dict containing the sets of # Ex: state_to_chars = { 0: set('a','b','c'), ...} state_to_chars = {} for (state, char), nextstate in self.transitions.iteritems(): @@ -266,7 +266,7 @@ if not elif_prefix: elif_prefix = "el" with result.block("else:"): - result.emit("break") + result.emit("break") #print state_to_chars.keys() for state in range(self.num_states): if state in state_to_chars: @@ -287,8 +287,13 @@ d = {'LexerError': LexerError} exec py.code.Source(result).compile() in d return d['recognize'] - + def make_lexing_code(self): + code = self.generate_lexing_code() + exec py.code.Source(code).compile() + return recognize + + def generate_lexing_code(self): from rpython.rlib.parsing.codebuilder import Codebuilder result = Codebuilder() result.start_block("def recognize(runner, i):") @@ -364,12 +369,12 @@ runner.state = state return ~i""") result.end_block("def") + result.emit("from rpython.rlib.parsing.deterministic import DFA") + result.emit("automaton = %s" % self) result = result.get_code() while "\n\n" in result: result = result.replace("\n\n", "\n") - #print result - exec py.code.Source(result).compile() - return recognize + return result def get_runner(self): return DFARunner(self) @@ -428,7 +433,7 @@ def nextstate(self, char): self.state = self.automaton[self.state, char] return self.state - + def recognize(self, s): self.state = 0 try: @@ -594,7 +599,7 @@ class SetNFARunner(object): def __init__(self, automaton): self.automaton = automaton - + def next_state(self, char): nextstates = set() for state in self.states: @@ -616,7 +621,7 @@ class BacktrackingNFARunner(object): def __init__(self, automaton): self.automaton = automaton - + def recognize(self, s): def recurse(i, state): if i == len(s): diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -153,6 +153,9 @@ def __exit__(self, *args): self.release() + def _cleanup_(self): + raise Exception("seeing a prebuilt rpython.rlib.rthread.Lock instance") + # ____________________________________________________________ # # Stack size diff --git a/rpython/rlib/test/test_rlocale.py b/rpython/rlib/test/test_rlocale.py --- a/rpython/rlib/test/test_rlocale.py +++ b/rpython/rlib/test/test_rlocale.py @@ -37,7 +37,7 @@ assert isinstance(grouping, str) def test_libintl(): - if sys.platform != "darwin" or not sys.platform.startswith("linux"): + if sys.platform != "darwin" and not sys.platform.startswith("linux"): py.test.skip("there is (maybe) no libintl here") _gettext = external('gettext', [rffi.CCHARP], rffi.CCHARP) res = _gettext("1234") From noreply at buildbot.pypy.org Thu Apr 17 01:35:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 17 Apr 2014 01:35:11 +0200 (CEST) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20140416233511.18CB61C0476@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70676:c7c7b54e61a0 Date: 2014-04-16 16:34 -0700 http://bitbucket.org/pypy/pypy/changeset/c7c7b54e61a0/ Log: merge upstream diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,7 +2,7 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat +import sys, os, stat, genericpath from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -522,7 +522,8 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - if os.path.isdir(filepart) and case_ok(filepart): + # os.path.isdir on win32 is not rpython when pywin32 installed + if genericpath.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -458,6 +458,13 @@ return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) + def readonly(self): + return NonWritableSlice(self.start, self.strides, self.backstrides, self.shape, self.parent, self.orig_arr, self.dtype) + +class NonWritableSlice(SliceArray): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) class VoidBoxStorage(BaseConcreteArray): def __init__(self, size, dtype): diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -142,6 +143,37 @@ def setitem(self, elem): self.array.setitem(self.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/nditer.py @@ -0,0 +1,577 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.error import OperationError +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy import ufuncs + + +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, space, array): + raise NotImplementedError("Abstract Class") + +class IteratorMixin(object): + _mixin_ = True + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item[self.index](space, array, self.it) + + def setitem(self, space, array, val): + xxx + +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 + +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 + +def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + ret = [] + if space.is_w(w_op_flags, space.w_None): + for i in range(n): + ret.append(OpFlag()) + elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + else: + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) + return ret + +class OpFlag(object): + def __init__(self): + self.rw = 'r' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + self.get_it_item = (get_readonly_item, get_readonly_slice) + +def get_readonly_item(space, array, it): + return space.wrap(it.getitem()) + +def get_readwrite_item(space, array, it): + #create a single-value view (since scalars are not views) + res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) + #it.dtype.setitem(res, 0, it.getitem()) + return W_NDimArray(res) + +def get_readonly_slice(space, array, it): + return W_NDimArray(it.getslice().readonly()) + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + +def parse_op_flag(space, lst): + op_flag = OpFlag() + for w_item in lst: + item = space.str_w(w_item) + if item == 'readonly': + op_flag.rw = 'r' + elif item == 'readwrite': + op_flag.rw = 'rw' + elif item == 'writeonly': + op_flag.rw = 'w' + elif item == 'no_broadcast': + op_flag.broadcast = False + elif item == 'contig': + op_flag.force_contig = True + elif item == 'aligned': + op_flag.force_align = True + elif item == 'nbo': + op_flag.native_byte_order = True + elif item == 'copy': + op_flag.tmp_copy = 'r' + elif item == 'updateifcopy': + op_flag.tmp_copy = 'rw' + elif item == 'allocate': + op_flag.allocate = True + elif item == 'no_subtype': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"no_subtype" op_flag not implemented yet')) + elif item == 'arraymask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"arraymask" op_flag not implemented yet')) + elif item == 'writemask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"writemask" op_flag not implemented yet')) + else: + raise OperationError(space.w_ValueError, space.wrap( + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == 'r': + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) + elif op_flag.rw == 'rw': + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + return op_flag + +def parse_func_flags(space, nditer, w_flags): + if space.is_w(w_flags, space.w_None): + return + elif not space.isinstance_w(w_flags, space.w_tuple) and not \ + space.isinstance_w(w_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + 'Iter global flags must be a list or tuple of strings')) + lst = space.listview(w_flags) + for w_item in lst: + if not space.isinstance_w(w_item, space.w_str) and not \ + space.isinstance_w(w_item, space.w_unicode): + typename = space.type(w_item).getname(space) + raise OperationError(space.w_TypeError, space.wrap( + 'expected string or Unicode object, %s found' % typename)) + item = space.str_w(w_item) + if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) + nditer.external_loop = True + elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) + # For numpy compatability + nditer.buffered = True + elif item == 'c_index': + nditer.tracked_index = 'C' + elif item == 'f_index': + nditer.tracked_index = 'F' + elif item == 'multi_index': + nditer.tracked_index = 'multi' + elif item == 'common_dtype': + nditer.common_dtype = True + elif item == 'delay_bufalloc': + nditer.delay_bufalloc = True + elif item == 'grow_inner': + nditer.grow_inner = True + elif item == 'ranged': + nditer.ranged = True + elif item == 'refs_ok': + nditer.refs_ok = True + elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) + nditer.reduce_ok = True + elif item == 'zerosize_ok': + nditer.zerosize_ok = True + else: + raise OperationError(space.w_ValueError, space.wrap( + 'Unexpected iterator global flag "%s"' % item)) + if nditer.tracked_index and nditer.external_loop: + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order =='F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation.astype(space, dtype) + backward = is_backward(imp, order) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret + +class W_NDIter(W_Root): + + def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, + w_op_axes, w_itershape, w_buffersize, order): + self.order = order + self.external_loop = False + self.buffered = False + self.tracked_index = '' + self.common_dtype = False + self.delay_bufalloc = False + self.grow_inner = False + self.ranged = False + self.refs_ok = False + self.reduce_ok = False + self.zerosize_ok = False + self.index_iter = None + self.done = False + self.first_next = True + self.op_axes = [] + if space.isinstance_w(w_seq, space.w_tuple) or \ + space.isinstance_w(w_seq, space.w_list): + w_seq_as_list = space.listview(w_seq) + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + else: + self.seq =[convert_to_array(space, w_seq)] + parse_func_flags(space, self, w_flags) + self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, + len(self.seq), parse_op_flag) + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) + if not space.is_none(w_op_dtypes): + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] + self.iters=[] + outargs = [i for i in range(len(self.seq)) \ + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation + out_dtype = self.dtypes[0] if len(self.dtypes) > 0 else None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype(space, + self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + if self.tracked_index == "multi": + backward = False + else: + backward = self.order != self.tracked_index + self.index_iter = IndexIterator(iter_shape, backward=backward) + if len(self.dtypes) > 0: + # Make sure dtypes make sense + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d and not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering")) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] + if self.external_loop: + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + + def set_op_axes(self, space, w_op_axes): + if space.len_w(w_op_axes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + op_axes = space.listview(w_op_axes) + l = -1 + for w_axis in op_axes: + if not space.is_none(w_axis): + axis_len = space.len_w(w_axis) + if l == -1: + l = axis_len + elif axis_len != l: + raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + if l == -1: + raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 + + def descr_iter(self, space): + return space.wrap(self) + + def descr_getitem(self, space, w_idx): + idx = space.int_w(w_idx) + try: + ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + except IndexError: + raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + return ret + + def descr_setitem(self, space, w_idx, w_value): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_len(self, space): + space.wrap(len(self.iters)) + + def descr_next(self, space): + for it in self.iters: + if not it.done(): + break + else: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + res = [] + if self.index_iter: + if not self.first_next: + self.index_iter.next() + else: + self.first_next = False + for i in range(len(self.iters)): + res.append(self.iters[i].getitem(space, self.seq[i])) + self.iters[i].next() + if len(res) <2: + return res[0] + return space.newtuple(res) + + def iternext(self): + if self.index_iter: + self.index_iter.next() + for i in range(len(self.iters)): + self.iters[i].next() + for it in self.iters: + if not it.done(): + break + else: + self.done = True + return self.done + return self.done + + def descr_iternext(self, space): + return space.wrap(self.iternext()) + + def descr_copy(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_debug_print(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_enable_external_loop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + @unwrap_spec(axis=int) + def descr_remove_axis(self, space, axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_remove_multi_index(self, space, w_multi_index): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_reset(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_operands(self, space): + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) + + def descr_get_dtypes(self, space): + res = [None] * len(self.seq) + for i in range(len(self.seq)): + res[i] = self.seq[i].descr_get_dtype(space) + return space.newtuple(res) + + def descr_get_finished(self, space): + return space.wrap(self.done) + + def descr_get_has_delayed_bufalloc(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_has_index(self, space): + return space.wrap(self.tracked_index in ["C", "F"]) + + def descr_get_index(self, space): + if not self.tracked_index in ["C", "F"]: + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.wrap(self.index_iter.getvalue()) + + def descr_get_has_multi_index(self, space): + return space.wrap(self.tracked_index == "multi") + + def descr_get_multi_index(self, space): + if not self.tracked_index == "multi": + raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.newtuple([space.wrap(x) for x in self.index_iter.index]) + + def descr_get_iterationneedsapi(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_iterindex(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itersize(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itviews(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_ndim(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_nop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_shape(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_value(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + + at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes = WrappedDefault(None), order=str, + w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) +def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order='K'): + return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order) + +W_NDIter.typedef = TypeDef( + 'nditer', + __iter__ = interp2app(W_NDIter.descr_iter), + __getitem__ = interp2app(W_NDIter.descr_getitem), + __setitem__ = interp2app(W_NDIter.descr_setitem), + __len__ = interp2app(W_NDIter.descr_len), + + next = interp2app(W_NDIter.descr_next), + iternext = interp2app(W_NDIter.descr_iternext), + copy = interp2app(W_NDIter.descr_copy), + debug_print = interp2app(W_NDIter.descr_debug_print), + enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), + remove_axis = interp2app(W_NDIter.descr_remove_axis), + remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), + reset = interp2app(W_NDIter.descr_reset), + + operands = GetSetProperty(W_NDIter.descr_get_operands), + dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), + finished = GetSetProperty(W_NDIter.descr_get_finished), + has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), + has_index = GetSetProperty(W_NDIter.descr_get_has_index), + index = GetSetProperty(W_NDIter.descr_get_index), + has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), + multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), + iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), + iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), + itersize = GetSetProperty(W_NDIter.descr_get_itersize), + itviews = GetSetProperty(W_NDIter.descr_get_itviews), + ndim = GetSetProperty(W_NDIter.descr_get_ndim), + nop = GetSetProperty(W_NDIter.descr_get_nop), + shape = GetSetProperty(W_NDIter.descr_get_shape), + value = GetSetProperty(W_NDIter.descr_get_value), +) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -0,0 +1,294 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNDIter(BaseNumpyAppTest): + def test_basic(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + + for x in nditer(a.T): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + + def test_order(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a, order='C'): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + for x in nditer(a, order='F'): + r.append(x) + assert r == [0, 3, 1, 4, 2, 5] + + def test_readwrite(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + for x in nditer(a, op_flags=['readwrite']): + x[...] = 2 * x + assert (a == [[0, 2, 4], [6, 8, 10]]).all() + + def test_external_loop(self): + from numpy import arange, nditer, array + a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + r = [] + n = 0 + for x in nditer(a, flags=['external_loop']): + r.append(x) + n += 1 + assert n == 1 + assert (array(r) == range(24)).all() + r = [] + n = 0 + for x in nditer(a, flags=['external_loop'], order='F'): + r.append(x) + n += 1 + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + + def test_index(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + exc = None + try: + it.index + except ValueError, e: + exc = e + assert exc + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + + def test_interface(self): + from numpy import arange, nditer, zeros + import sys + a = arange(6).reshape(2,3) + r = [] + it = nditer(a, flags=['f_index']) + while not it.finished: + r.append((it[0], it.index)) + it.iternext() + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') + while not it.finished: + it[0] = it.multi_index[1] - it.multi_index[0] + it.iternext() + assert (a == [[0, 1, 2], [-1, 0, 1]]).all() + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + + def test_buffered(self): + from numpy import arange, nditer, array + a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') + r = [] + for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() + + def test_op_dtype(self): + from numpy import arange, nditer, sqrt, array + a = arange(6).reshape(2,3) - 3 + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) + assert str(exc.value).startswith("Iterator operand required copying or buffering") + r = [] + for x in nditer(a, op_flags=['readonly','copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + r = [] + for x in nditer(a, op_flags=['copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + + def test_casting(self): + from numpy import arange, nditer + import sys + a = arange(6.) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], + casting='same_kind'): + r.append(x) + assert r == [0., 1., 2., 3., 4., 5.] + exc = raises(TypeError, nditer, a, flags=['buffered'], + op_dtypes=['int32'], casting='same_kind') + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + b = arange(6) + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + op_flags=['readwrite'], casting='same_kind') + assert str(exc.value).startswith("Iterator requested dtype could not be cast") + + def test_broadcast(self): + from numpy import arange, nditer + a = arange(3) + b = arange(6).reshape(2,3) + r = [] + for x,y in nditer([a, b]): + r.append((x, y)) + assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] + a = arange(2) + exc = raises(ValueError, nditer, [a, b]) + assert str(exc.value).find('shapes (2) (2,3)') > 0 + + def test_outarg(self): + from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') + + def square1(a): + it = nditer([a, None]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square1([1, 2, 3]) == [1, 4, 9]).all() + + def square2(a, out=None): + it = nditer([a, out], flags=['external_loop', 'buffered'], + op_flags=[['readonly'], + ['writeonly', 'allocate', 'no_broadcast']]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square2([1, 2, 3]) == [1, 4, 9]).all() + b = zeros((3, )) + c = square2([1, 2, 3], out=b) + assert (c == [1., 4., 9.]).all() + assert (b == c).all() + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) + assert str(exc.value).find('cannot be broadcasted') > 0 + + def test_outer_product(self): + from numpy import nditer, arange + a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + b = arange(8).reshape(2,4) + it = nditer([a, b, None], flags=['external_loop'], + op_axes=[[0, -1, -1], [-1, 0, 1], None]) + for x, y, z in it: + z[...] = x*y + assert it.operands[2].shape == (3, 2, 4) + for i in range(a.size): + assert (it.operands[2][i] == a[i]*b).all() + + def test_reduction(self): + from numpy import nditer, arange, array + import sys + a = arange(24).reshape(2, 3, 4) + b = array(0) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') + #reduction operands must be readwrite + for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite']]): + y[...] += x + assert b == 276 + assert b == a.sum() + + # reduction and allocation requires op_axes and initialization + it = nditer([a, None], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + # previous example with buffering, requires more flags and reset + it = nditer([a, None], flags=['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + it.reset() + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + def test_get_dtypes(self): + from numpy import array, nditer + x = array([1, 2]) + y = array([1.0, 2.0]) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) + + def test_multi_index(self): + import numpy as np + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + res = [] + while not it.finished: + res.append((it[0], it.multi_index)) + it.iternext() + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] From noreply at buildbot.pypy.org Thu Apr 17 01:39:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 01:39:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Backed out merge changeset: bace5e5bd016 Message-ID: <20140416233915.074BC1C02FC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70677:6dbec8d4abec Date: 2014-04-17 02:35 +0300 http://bitbucket.org/pypy/pypy/changeset/6dbec8d4abec/ Log: Backed out merge changeset: bace5e5bd016 Backed out merge revision to its first parent (5494b1aac76f) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,7 +23,6 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', - 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -458,13 +458,6 @@ return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) - def readonly(self): - return NonWritableSlice(self.start, self.strides, self.backstrides, self.shape, self.parent, self.orig_arr, self.dtype) - -class NonWritableSlice(SliceArray): - def descr_setitem(self, space, orig_array, w_index, w_value): - raise OperationError(space.w_ValueError, space.wrap( - "assignment destination is read-only")) class VoidBoxStorage(BaseConcreteArray): def __init__(self, size, dtype): diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,7 +42,6 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support -from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -143,37 +142,6 @@ def setitem(self, elem): self.array.setitem(self.offset, elem) -class SliceIterator(ArrayIter): - def __init__(self, arr, strides, backstrides, shape, order="C", - backward=False, dtype=None): - if dtype is None: - dtype = arr.implementation.dtype - self.dtype = dtype - self.arr = arr - if backward: - self.slicesize = shape[0] - self.gap = [support.product(shape[1:]) * dtype.elsize] - strides = strides[1:] - backstrides = backstrides[1:] - shape = shape[1:] - strides.reverse() - backstrides.reverse() - shape.reverse() - size = support.product(shape) - else: - shape = [support.product(shape)] - strides, backstrides = calc_strides(shape, dtype, order) - size = 1 - self.slicesize = support.product(shape) - self.gap = strides - - ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) - - def getslice(self): - from pypy.module.micronumpy.concrete import SliceArray - retVal = SliceArray(self.offset, self.gap, self.backstrides, - [self.slicesize], self.arr.implementation, self.arr, self.dtype) - return retVal def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py deleted file mode 100644 --- a/pypy/module/micronumpy/nditer.py +++ /dev/null @@ -1,577 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement, shape_agreement_multiple) -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator -from pypy.module.micronumpy.concrete import SliceArray -from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy import ufuncs - - -class AbstractIterator(object): - def done(self): - raise NotImplementedError("Abstract Class") - - def next(self): - raise NotImplementedError("Abstract Class") - - def getitem(self, space, array): - raise NotImplementedError("Abstract Class") - -class IteratorMixin(object): - _mixin_ = True - def __init__(self, it, op_flags): - self.it = it - self.op_flags = op_flags - - def done(self): - return self.it.done() - - def next(self): - self.it.next() - - def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, array, self.it) - - def setitem(self, space, array, val): - xxx - -class BoxIterator(IteratorMixin, AbstractIterator): - index = 0 - -class ExternalLoopIterator(IteratorMixin, AbstractIterator): - index = 1 - -def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): - ret = [] - if space.is_w(w_op_flags, space.w_None): - for i in range(n): - ret.append(OpFlag()) - elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ - space.isinstance_w(w_op_flags, space.w_list): - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - else: - w_lst = space.listview(w_op_flags) - if space.isinstance_w(w_lst[0], space.w_tuple) or \ - space.isinstance_w(w_lst[0], space.w_list): - if len(w_lst) != n: - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - for item in w_lst: - ret.append(parse_one_arg(space, space.listview(item))) - else: - op_flag = parse_one_arg(space, w_lst) - for i in range(n): - ret.append(op_flag) - return ret - -class OpFlag(object): - def __init__(self): - self.rw = 'r' - self.broadcast = True - self.force_contig = False - self.force_align = False - self.native_byte_order = False - self.tmp_copy = '' - self.allocate = False - self.get_it_item = (get_readonly_item, get_readonly_slice) - -def get_readonly_item(space, array, it): - return space.wrap(it.getitem()) - -def get_readwrite_item(space, array, it): - #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) - #it.dtype.setitem(res, 0, it.getitem()) - return W_NDimArray(res) - -def get_readonly_slice(space, array, it): - return W_NDimArray(it.getslice().readonly()) - -def get_readwrite_slice(space, array, it): - return W_NDimArray(it.getslice()) - -def parse_op_flag(space, lst): - op_flag = OpFlag() - for w_item in lst: - item = space.str_w(w_item) - if item == 'readonly': - op_flag.rw = 'r' - elif item == 'readwrite': - op_flag.rw = 'rw' - elif item == 'writeonly': - op_flag.rw = 'w' - elif item == 'no_broadcast': - op_flag.broadcast = False - elif item == 'contig': - op_flag.force_contig = True - elif item == 'aligned': - op_flag.force_align = True - elif item == 'nbo': - op_flag.native_byte_order = True - elif item == 'copy': - op_flag.tmp_copy = 'r' - elif item == 'updateifcopy': - op_flag.tmp_copy = 'rw' - elif item == 'allocate': - op_flag.allocate = True - elif item == 'no_subtype': - raise OperationError(space.w_NotImplementedError, space.wrap( - '"no_subtype" op_flag not implemented yet')) - elif item == 'arraymask': - raise OperationError(space.w_NotImplementedError, space.wrap( - '"arraymask" op_flag not implemented yet')) - elif item == 'writemask': - raise OperationError(space.w_NotImplementedError, space.wrap( - '"writemask" op_flag not implemented yet')) - else: - raise OperationError(space.w_ValueError, space.wrap( - 'op_flags must be a tuple or array of per-op flag-tuples')) - if op_flag.rw == 'r': - op_flag.get_it_item = (get_readonly_item, get_readonly_slice) - elif op_flag.rw == 'rw': - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) - elif op_flag.rw == 'w': - # XXX Extra logic needed to make sure writeonly - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) - return op_flag - -def parse_func_flags(space, nditer, w_flags): - if space.is_w(w_flags, space.w_None): - return - elif not space.isinstance_w(w_flags, space.w_tuple) and not \ - space.isinstance_w(w_flags, space.w_list): - raise OperationError(space.w_ValueError, space.wrap( - 'Iter global flags must be a list or tuple of strings')) - lst = space.listview(w_flags) - for w_item in lst: - if not space.isinstance_w(w_item, space.w_str) and not \ - space.isinstance_w(w_item, space.w_unicode): - typename = space.type(w_item).getname(space) - raise OperationError(space.w_TypeError, space.wrap( - 'expected string or Unicode object, %s found' % typename)) - item = space.str_w(w_item) - if item == 'external_loop': - raise OperationError(space.w_NotImplementedError, space.wrap( - 'nditer external_loop not implemented yet')) - nditer.external_loop = True - elif item == 'buffered': - raise OperationError(space.w_NotImplementedError, space.wrap( - 'nditer buffered not implemented yet')) - # For numpy compatability - nditer.buffered = True - elif item == 'c_index': - nditer.tracked_index = 'C' - elif item == 'f_index': - nditer.tracked_index = 'F' - elif item == 'multi_index': - nditer.tracked_index = 'multi' - elif item == 'common_dtype': - nditer.common_dtype = True - elif item == 'delay_bufalloc': - nditer.delay_bufalloc = True - elif item == 'grow_inner': - nditer.grow_inner = True - elif item == 'ranged': - nditer.ranged = True - elif item == 'refs_ok': - nditer.refs_ok = True - elif item == 'reduce_ok': - raise OperationError(space.w_NotImplementedError, space.wrap( - 'nditer reduce_ok not implemented yet')) - nditer.reduce_ok = True - elif item == 'zerosize_ok': - nditer.zerosize_ok = True - else: - raise OperationError(space.w_ValueError, space.wrap( - 'Unexpected iterator global flag "%s"' % item)) - if nditer.tracked_index and nditer.external_loop: - raise OperationError(space.w_ValueError, space.wrap( - 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' - 'multi-index is being tracked')) - -def is_backward(imp, order): - if order == 'K' or (order == 'C' and imp.order == 'C'): - return False - elif order =='F' and imp.order == 'C': - return True - else: - raise NotImplementedError('not implemented yet') - -def get_iter(space, order, arr, shape, dtype): - imp = arr.implementation.astype(space, dtype) - backward = is_backward(imp, order) - if (imp.strides[0] < imp.strides[-1] and not backward) or \ - (imp.strides[0] > imp.strides[-1] and backward): - # flip the strides. Is this always true for multidimension? - strides = imp.strides[:] - backstrides = imp.backstrides[:] - shape = imp.shape[:] - strides.reverse() - backstrides.reverse() - shape.reverse() - else: - strides = imp.strides - backstrides = imp.backstrides - r = calculate_broadcast_strides(strides, backstrides, imp.shape, - shape, backward) - return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) - -def get_external_loop_iter(space, order, arr, shape): - imp = arr.implementation - backward = is_backward(imp, order) - return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) - -def convert_to_array_or_none(space, w_elem): - ''' - None will be passed through, all others will be converted - ''' - if space.is_none(w_elem): - return None - return convert_to_array(space, w_elem) - - -class IndexIterator(object): - def __init__(self, shape, backward=False): - self.shape = shape - self.index = [0] * len(shape) - self.backward = backward - - def next(self): - # TODO It's probably possible to refactor all the "next" method from each iterator - for i in range(len(self.shape) - 1, -1, -1): - if self.index[i] < self.shape[i] - 1: - self.index[i] += 1 - break - else: - self.index[i] = 0 - - def getvalue(self): - if not self.backward: - ret = self.index[-1] - for i in range(len(self.shape) - 2, -1, -1): - ret += self.index[i] * self.shape[i - 1] - else: - ret = self.index[0] - for i in range(1, len(self.shape)): - ret += self.index[i] * self.shape[i - 1] - return ret - -class W_NDIter(W_Root): - - def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, - w_op_axes, w_itershape, w_buffersize, order): - self.order = order - self.external_loop = False - self.buffered = False - self.tracked_index = '' - self.common_dtype = False - self.delay_bufalloc = False - self.grow_inner = False - self.ranged = False - self.refs_ok = False - self.reduce_ok = False - self.zerosize_ok = False - self.index_iter = None - self.done = False - self.first_next = True - self.op_axes = [] - if space.isinstance_w(w_seq, space.w_tuple) or \ - space.isinstance_w(w_seq, space.w_list): - w_seq_as_list = space.listview(w_seq) - self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] - else: - self.seq =[convert_to_array(space, w_seq)] - parse_func_flags(space, self, w_flags) - self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, - len(self.seq), parse_op_flag) - if not space.is_none(w_op_axes): - self.set_op_axes(space, w_op_axes) - if not space.is_none(w_op_dtypes): - w_seq_as_list = space.listview(w_op_dtypes) - self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] - if len(self.dtypes) != len(self.seq): - raise OperationError(space.w_ValueError, space.wrap( - "op_dtypes must be a tuple/list matching the number of ops")) - else: - self.dtypes = [] - self.iters=[] - outargs = [i for i in range(len(self.seq)) \ - if self.seq[i] is None or self.op_flags[i].rw == 'w'] - if len(outargs) > 0: - out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) - else: - out_shape = None - self.shape = iter_shape = shape_agreement_multiple(space, self.seq, - shape=out_shape) - if len(outargs) > 0: - # Make None operands writeonly and flagged for allocation - out_dtype = self.dtypes[0] if len(self.dtypes) > 0 else None - for i in range(len(self.seq)): - if self.seq[i] is None: - self.op_flags[i].get_it_item = (get_readwrite_item, - get_readwrite_slice) - self.op_flags[i].allocate = True - continue - if self.op_flags[i].rw == 'w': - continue - out_dtype = ufuncs.find_binop_result_dtype(space, - self.seq[i].get_dtype(), out_dtype) - for i in outargs: - if self.seq[i] is None: - # XXX can we postpone allocation to later? - self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) - else: - if not self.op_flags[i].broadcast: - # Raises if ooutput cannot be broadcast - shape_agreement(space, iter_shape, self.seq[i], False) - if self.tracked_index != "": - if self.order == "K": - self.order = self.seq[0].implementation.order - if self.tracked_index == "multi": - backward = False - else: - backward = self.order != self.tracked_index - self.index_iter = IndexIterator(iter_shape, backward=backward) - if len(self.dtypes) > 0: - # Make sure dtypes make sense - for i in range(len(self.seq)): - selfd = self.dtypes[i] - seq_d = self.seq[i].get_dtype() - if not selfd: - self.dtypes[i] = seq_d - elif selfd != seq_d and not 'r' in self.op_flags[i].tmp_copy: - raise OperationError(space.w_TypeError, space.wrap( - "Iterator operand required copying or buffering")) - else: - #copy them from seq - self.dtypes = [s.get_dtype() for s in self.seq] - if self.external_loop: - for i in range(len(self.seq)): - self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, - self.seq[i], iter_shape), self.op_flags[i])) - else: - for i in range(len(self.seq)): - self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i], iter_shape, self.dtypes[i]), - self.op_flags[i])) - - def set_op_axes(self, space, w_op_axes): - if space.len_w(w_op_axes) != len(self.seq): - raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) - op_axes = space.listview(w_op_axes) - l = -1 - for w_axis in op_axes: - if not space.is_none(w_axis): - axis_len = space.len_w(w_axis) - if l == -1: - l = axis_len - elif axis_len != l: - raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) - self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) - if l == -1: - raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) - raise Exception('xxx TODO') - # Check that values make sense: - # - in bounds for each operand - # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions - # - no repeat axis - # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 - - def descr_iter(self, space): - return space.wrap(self) - - def descr_getitem(self, space, w_idx): - idx = space.int_w(w_idx) - try: - ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) - except IndexError: - raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) - return ret - - def descr_setitem(self, space, w_idx, w_value): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_len(self, space): - space.wrap(len(self.iters)) - - def descr_next(self, space): - for it in self.iters: - if not it.done(): - break - else: - self.done = True - raise OperationError(space.w_StopIteration, space.w_None) - res = [] - if self.index_iter: - if not self.first_next: - self.index_iter.next() - else: - self.first_next = False - for i in range(len(self.iters)): - res.append(self.iters[i].getitem(space, self.seq[i])) - self.iters[i].next() - if len(res) <2: - return res[0] - return space.newtuple(res) - - def iternext(self): - if self.index_iter: - self.index_iter.next() - for i in range(len(self.iters)): - self.iters[i].next() - for it in self.iters: - if not it.done(): - break - else: - self.done = True - return self.done - return self.done - - def descr_iternext(self, space): - return space.wrap(self.iternext()) - - def descr_copy(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_debug_print(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_enable_external_loop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - @unwrap_spec(axis=int) - def descr_remove_axis(self, space, axis): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_remove_multi_index(self, space, w_multi_index): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_reset(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_operands(self, space): - l_w = [] - for op in self.seq: - l_w.append(op.descr_view(space)) - return space.newlist(l_w) - - def descr_get_dtypes(self, space): - res = [None] * len(self.seq) - for i in range(len(self.seq)): - res[i] = self.seq[i].descr_get_dtype(space) - return space.newtuple(res) - - def descr_get_finished(self, space): - return space.wrap(self.done) - - def descr_get_has_delayed_bufalloc(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_has_index(self, space): - return space.wrap(self.tracked_index in ["C", "F"]) - - def descr_get_index(self, space): - if not self.tracked_index in ["C", "F"]: - raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) - if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) - return space.wrap(self.index_iter.getvalue()) - - def descr_get_has_multi_index(self, space): - return space.wrap(self.tracked_index == "multi") - - def descr_get_multi_index(self, space): - if not self.tracked_index == "multi": - raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) - if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) - return space.newtuple([space.wrap(x) for x in self.index_iter.index]) - - def descr_get_iterationneedsapi(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_iterindex(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_itersize(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_itviews(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_ndim(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_nop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_shape(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - def descr_get_value(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) - - - at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), - w_op_dtypes = WrappedDefault(None), order=str, - w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), - w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) -def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): - return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order) - -W_NDIter.typedef = TypeDef( - 'nditer', - __iter__ = interp2app(W_NDIter.descr_iter), - __getitem__ = interp2app(W_NDIter.descr_getitem), - __setitem__ = interp2app(W_NDIter.descr_setitem), - __len__ = interp2app(W_NDIter.descr_len), - - next = interp2app(W_NDIter.descr_next), - iternext = interp2app(W_NDIter.descr_iternext), - copy = interp2app(W_NDIter.descr_copy), - debug_print = interp2app(W_NDIter.descr_debug_print), - enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), - remove_axis = interp2app(W_NDIter.descr_remove_axis), - remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), - reset = interp2app(W_NDIter.descr_reset), - - operands = GetSetProperty(W_NDIter.descr_get_operands), - dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), - finished = GetSetProperty(W_NDIter.descr_get_finished), - has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), - has_index = GetSetProperty(W_NDIter.descr_get_has_index), - index = GetSetProperty(W_NDIter.descr_get_index), - has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), - multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), - iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), - iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), - itersize = GetSetProperty(W_NDIter.descr_get_itersize), - itviews = GetSetProperty(W_NDIter.descr_get_itviews), - ndim = GetSetProperty(W_NDIter.descr_get_ndim), - nop = GetSetProperty(W_NDIter.descr_get_nop), - shape = GetSetProperty(W_NDIter.descr_get_shape), - value = GetSetProperty(W_NDIter.descr_get_value), -) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,16 +282,14 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list, shape=None): +def shape_agreement_multiple(space, array_list): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - for arr in array_list: + shape = array_list[0].get_shape() + for arr in array_list[1:]: if not space.is_none(arr): - if shape is None: - shape = arr.get_shape() - else: - shape = shape_agreement(space, shape, arr) + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py deleted file mode 100644 --- a/pypy/module/micronumpy/test/test_nditer.py +++ /dev/null @@ -1,294 +0,0 @@ -import py -from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest - - -class AppTestNDIter(BaseNumpyAppTest): - def test_basic(self): - from numpy import arange, nditer - a = arange(6).reshape(2,3) - r = [] - for x in nditer(a): - r.append(x) - assert r == [0, 1, 2, 3, 4, 5] - r = [] - - for x in nditer(a.T): - r.append(x) - assert r == [0, 1, 2, 3, 4, 5] - - def test_order(self): - from numpy import arange, nditer - a = arange(6).reshape(2,3) - r = [] - for x in nditer(a, order='C'): - r.append(x) - assert r == [0, 1, 2, 3, 4, 5] - r = [] - for x in nditer(a, order='F'): - r.append(x) - assert r == [0, 3, 1, 4, 2, 5] - - def test_readwrite(self): - from numpy import arange, nditer - a = arange(6).reshape(2,3) - for x in nditer(a, op_flags=['readwrite']): - x[...] = 2 * x - assert (a == [[0, 2, 4], [6, 8, 10]]).all() - - def test_external_loop(self): - from numpy import arange, nditer, array - a = arange(24).reshape(2, 3, 4) - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, nditer, a, flags=['external_loop']) - skip('nditer external_loop not implmented') - r = [] - n = 0 - for x in nditer(a, flags=['external_loop']): - r.append(x) - n += 1 - assert n == 1 - assert (array(r) == range(24)).all() - r = [] - n = 0 - for x in nditer(a, flags=['external_loop'], order='F'): - r.append(x) - n += 1 - assert n == 12 - assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() - e = raises(ValueError, 'r[0][0] = 0') - assert str(e.value) == 'assignment destination is read-only' - r = [] - for x in nditer(a.T, flags=['external_loop'], order='F'): - r.append(x) - array_r = array(r) - assert len(array_r.shape) == 2 - assert array_r.shape == (1,24) - assert (array(r) == arange(24)).all() - - def test_index(self): - from numpy import arange, nditer - a = arange(6).reshape(2,3) - - r = [] - it = nditer(a, flags=['c_index']) - assert it.has_index - for value in it: - r.append((value, it.index)) - assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] - exc = None - try: - it.index - except ValueError, e: - exc = e - assert exc - - r = [] - it = nditer(a, flags=['f_index']) - assert it.has_index - for value in it: - r.append((value, it.index)) - assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] - - @py.test.mark.xfail(reason="Fortran order not implemented") - def test_iters_with_different_order(self): - from numpy import nditer, array - - a = array([[1, 2], [3, 4]], order="C") - b = array([[1, 2], [3, 4]], order="F") - - it = nditer([a, b]) - - assert list(it) == zip(range(1, 5), range(1, 5)) - - def test_interface(self): - from numpy import arange, nditer, zeros - import sys - a = arange(6).reshape(2,3) - r = [] - it = nditer(a, flags=['f_index']) - while not it.finished: - r.append((it[0], it.index)) - it.iternext() - assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] - it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, 'it[0] = 3') - skip('nditer.__setitem__ not implmented') - while not it.finished: - it[0] = it.multi_index[1] - it.multi_index[0] - it.iternext() - assert (a == [[0, 1, 2], [-1, 0, 1]]).all() - # b = zeros((2, 3)) - # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) - # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") - - def test_buffered(self): - from numpy import arange, nditer, array - a = arange(6).reshape(2,3) - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, nditer, a, flags=['buffered']) - skip('nditer buffered not implmented') - r = [] - for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): - r.append(x) - array_r = array(r) - assert len(array_r.shape) == 2 - assert array_r.shape == (1, 6) - assert (array_r == [0, 3, 1, 4, 2, 5]).all() - - def test_op_dtype(self): - from numpy import arange, nditer, sqrt, array - a = arange(6).reshape(2,3) - 3 - exc = raises(TypeError, nditer, a, op_dtypes=['complex']) - assert str(exc.value).startswith("Iterator operand required copying or buffering") - r = [] - for x in nditer(a, op_flags=['readonly','copy'], - op_dtypes=['complex128']): - r.append(sqrt(x)) - assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, - 1+0j, 1.41421356237+0j]).sum()) < 1e-5 - r = [] - for x in nditer(a, op_flags=['copy'], - op_dtypes=['complex128']): - r.append(sqrt(x)) - assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, - 1+0j, 1.41421356237+0j]).sum()) < 1e-5 - - def test_casting(self): - from numpy import arange, nditer - import sys - a = arange(6.) - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) - skip('nditer casting not implemented yet') - exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) - assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") - r = [] - for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], - casting='same_kind'): - r.append(x) - assert r == [0., 1., 2., 3., 4., 5.] - exc = raises(TypeError, nditer, a, flags=['buffered'], - op_dtypes=['int32'], casting='same_kind') - assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") - r = [] - b = arange(6) - exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], - op_flags=['readwrite'], casting='same_kind') - assert str(exc.value).startswith("Iterator requested dtype could not be cast") - - def test_broadcast(self): - from numpy import arange, nditer - a = arange(3) - b = arange(6).reshape(2,3) - r = [] - for x,y in nditer([a, b]): - r.append((x, y)) - assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] - a = arange(2) - exc = raises(ValueError, nditer, [a, b]) - assert str(exc.value).find('shapes (2) (2,3)') > 0 - - def test_outarg(self): - from numpy import nditer, zeros, arange - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) - skip('nditer external_loop not implmented') - - def square1(a): - it = nditer([a, None]) - for x,y in it: - y[...] = x*x - return it.operands[1] - assert (square1([1, 2, 3]) == [1, 4, 9]).all() - - def square2(a, out=None): - it = nditer([a, out], flags=['external_loop', 'buffered'], - op_flags=[['readonly'], - ['writeonly', 'allocate', 'no_broadcast']]) - for x,y in it: - y[...] = x*x - return it.operands[1] - assert (square2([1, 2, 3]) == [1, 4, 9]).all() - b = zeros((3, )) - c = square2([1, 2, 3], out=b) - assert (c == [1., 4., 9.]).all() - assert (b == c).all() - exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) - assert str(exc.value).find('cannot be broadcasted') > 0 - - def test_outer_product(self): - from numpy import nditer, arange - a = arange(3) - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, nditer, a, flags=['external_loop']) - skip('nditer external_loop not implmented') - b = arange(8).reshape(2,4) - it = nditer([a, b, None], flags=['external_loop'], - op_axes=[[0, -1, -1], [-1, 0, 1], None]) - for x, y, z in it: - z[...] = x*y - assert it.operands[2].shape == (3, 2, 4) - for i in range(a.size): - assert (it.operands[2][i] == a[i]*b).all() - - def test_reduction(self): - from numpy import nditer, arange, array - import sys - a = arange(24).reshape(2, 3, 4) - b = array(0) - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) - skip('nditer reduce_ok not implemented yet') - #reduction operands must be readwrite - for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], - op_flags=[['readonly'], ['readwrite']]): - y[...] += x - assert b == 276 - assert b == a.sum() - - # reduction and allocation requires op_axes and initialization - it = nditer([a, None], flags=['reduce_ok', 'external_loop'], - op_flags=[['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0,1,-1]]) - it.operands[1][...] = 0 - for x, y in it: - y[...] += x - - assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() - assert (it.operands[1] == a.sum(axis=2)).all() - - # previous example with buffering, requires more flags and reset - it = nditer([a, None], flags=['reduce_ok', 'external_loop', - 'buffered', 'delay_bufalloc'], - op_flags=[['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0,1,-1]]) - it.operands[1][...] = 0 - it.reset() - for x, y in it: - y[...] += x - - assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() - assert (it.operands[1] == a.sum(axis=2)).all() - - def test_get_dtypes(self): - from numpy import array, nditer - x = array([1, 2]) - y = array([1.0, 2.0]) - assert nditer([x, y]).dtypes == (x.dtype, y.dtype) - - def test_multi_index(self): - import numpy as np - a = np.arange(6).reshape(2, 3) - it = np.nditer(a, flags=['multi_index']) - res = [] - while not it.finished: - res.append((it[0], it.multi_index)) - it.iternext() - assert res == [(0, (0, 0)), (1, (0, 1)), - (2, (0, 2)), (3, (1, 0)), - (4, (1, 1)), (5, (1, 2))] From noreply at buildbot.pypy.org Thu Apr 17 01:39:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 01:39:16 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads - back out premature merge of nditer Message-ID: <20140416233916.472EF1C02FC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70678:16be85246762 Date: 2014-04-17 02:38 +0300 http://bitbucket.org/pypy/pypy/changeset/16be85246762/ Log: merge heads - back out premature merge of nditer diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -462,26 +462,23 @@ raise oefmt(self.w_SystemError, "getbuiltinmodule() called with non-builtin module %s", name) - else: - # Add the module to sys.modules and initialize the module - # The order is important to avoid recursions. - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - if not reuse and w_mod.startup_called: - # create a copy of the module. (see issue1514) - # eventlet patcher relies on this behaviour. - w_mod2 = self.wrap(Module(self, w_name)) - self.setitem(w_modules, w_name, w_mod2) - w_mod.getdict(self) # unlazy w_initialdict - self.call_method(w_mod2.getdict(self), 'update', - w_mod.w_initialdict) - return w_mod2 - # - w_mod.init(self) - # Add the module to sys.modules + # Add the module to sys.modules and initialize the module. The + # order is important to avoid recursions. + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) eventlet + # patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 self.setitem(w_modules, w_name, w_mod) - return w_mod + w_mod.init(self) + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" From noreply at buildbot.pypy.org Thu Apr 17 01:49:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 01:49:02 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140416234902.99D1B1C02FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70679:cf13db4663c3 Date: 2014-04-16 19:48 -0400 http://bitbucket.org/pypy/pypy/changeset/cf13db4663c3/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -137,3 +137,5 @@ .. branch: issue1514 Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer From noreply at buildbot.pypy.org Thu Apr 17 05:27:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 05:27:27 +0200 (CEST) Subject: [pypy-commit] pypy default: enable another test_zjit Message-ID: <20140417032727.163481C02FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70680:dd3c87c0ab56 Date: 2014-04-16 21:18 -0400 http://bitbucket.org/pypy/pypy/changeset/dd3c87c0ab56/ Log: enable another test_zjit diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -94,14 +94,26 @@ a -> 3 """ - def test_floatadd(self): + def test_float_add(self): result = self.run("float_add") assert result == 3 + 3 - py.test.skip("don't run for now") - self.check_simple_loop({"raw_load": 1, "float_add": 1, - "raw_store": 1, "int_add": 1, - "int_ge": 1, "guard_false": 1, "jump": 1, - 'arraylen_gc': 1}) + self.check_trace_count(1) + self.check_simple_loop({ + 'float_add': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 3, + 'int_add': 9, + 'int_ge': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) def define_sum(): return """ From noreply at buildbot.pypy.org Thu Apr 17 05:36:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 05:36:46 +0200 (CEST) Subject: [pypy-commit] pypy default: test with tzname rather than tzset for win32 compat Message-ID: <20140417033646.0F9071C0476@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70681:19e7e8192d2e Date: 2014-04-16 23:36 -0400 http://bitbucket.org/pypy/pypy/changeset/19e7e8192d2e/ Log: test with tzname rather than tzset for win32 compat diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -587,17 +587,17 @@ def test_reimport_builtin(self): import sys, time oldpath = sys.path - time.tzset = "" + time.tzname = "" del sys.modules['time'] import time as time1 assert sys.modules['time'] is time1 - assert time.tzset == "" + assert time.tzname == "" - reload(time1) # don't leave a broken time.tzset behind + reload(time1) # don't leave a broken time.tzname behind import time - assert time.tzset != "" + assert time.tzname != "" def test_reload_infinite(self): import infinite_reload From noreply at buildbot.pypy.org Thu Apr 17 07:40:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 07:40:12 +0200 (CEST) Subject: [pypy-commit] pypy default: support astype in micronumpy compile Message-ID: <20140417054012.DE4E91C0476@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70682:be03fa07697d Date: 2014-04-17 01:22 -0400 http://bitbucket.org/pypy/pypy/changeset/be03fa07697d/ Log: support astype in micronumpy compile diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -37,7 +37,7 @@ "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -596,6 +596,8 @@ arg = self.args[1].execute(interp) if self.name == 'view': w_res = arr.descr_view(interp.space, arg) + elif self.name == 'astype': + w_res = arr.descr_astype(interp.space, arg) else: assert False else: diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -319,3 +319,14 @@ ''') results = interp.results[0] assert isinstance(results, W_NDimArray) + + def test_astype_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = astype(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + assert results.get_dtype().is_int() From noreply at buildbot.pypy.org Thu Apr 17 07:40:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 07:40:14 +0200 (CEST) Subject: [pypy-commit] pypy default: add a test_zjit for pow Message-ID: <20140417054014.1D56F1C0476@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70683:f2bdd3baf2d1 Date: 2014-04-17 01:26 -0400 http://bitbucket.org/pypy/pypy/changeset/f2bdd3baf2d1/ Log: add a test_zjit for pow diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -388,6 +388,8 @@ w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': w_res = w_lhs.descr_sub(interp.space, w_rhs) + elif self.name == '**': + w_res = w_lhs.descr_pow(interp.space, w_rhs) elif self.name == '->': if isinstance(w_rhs, FloatObject): w_rhs = IntObject(int(w_rhs.floatval)) @@ -622,7 +624,7 @@ (':', 'colon'), ('\w+', 'identifier'), ('\]', 'array_right'), - ('(->)|[\+\-\*\/]', 'operator'), + ('(->)|[\+\-\*\/]+', 'operator'), ('=', 'assign'), (',', 'comma'), ('\|', 'pipe'), diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -115,6 +115,38 @@ 'setfield_gc': 6, }) + def define_pow(): + return """ + a = |30| ** 2 + a -> 3 + """ + + def test_pow(self): + result = self.run("pow") + assert result == 3 ** 2 + self.check_trace_count(1) + self.check_simple_loop({ + 'call': 3, + 'float_add': 1, + 'float_eq': 3, + 'float_mul': 2, + 'float_ne': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 4, + 'guard_not_invalidated': 1, + 'guard_true': 5, + 'int_add': 9, + 'int_ge': 1, + 'int_is_true': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) + def define_sum(): return """ a = |30| From noreply at buildbot.pypy.org Thu Apr 17 10:34:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 17 Apr 2014 10:34:35 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: kill more Message-ID: <20140417083435.483C81D2954@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70684:7780bb8b2a13 Date: 2014-04-16 17:43 +0200 http://bitbucket.org/pypy/pypy/changeset/7780bb8b2a13/ Log: kill more diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -123,7 +123,6 @@ gct_stm_become_globally_unique_transaction = _gct_with_roots_pushed gct_stm_perform_transaction = _gct_with_roots_pushed - gct_stm_inspect_abort_info = _gct_with_roots_pushed gct_stm_threadlocalref_set = _gct_with_roots_pushed diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -959,9 +959,6 @@ op_stm_get_atomic = _stm_not_implemented op_stm_change_atomic = _stm_not_implemented op_stm_set_transaction_length = _stm_not_implemented - op_stm_abort_info_push = _stm_not_implemented - op_stm_abort_info_pop = _stm_not_implemented - op_stm_inspect_abort_info = _stm_not_implemented op_stm_threadlocal_get = _stm_not_implemented op_stm_threadlocal_set = _stm_not_implemented op_stm_threadlocalref_get = _stm_not_implemented diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -448,10 +448,6 @@ 'stm_decrement_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), - 'stm_abort_info_push': LLOp(), - 'stm_abort_info_pop': LLOp(), - 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), - 'stm_ignored_start': LLOp(canrun=True), 'stm_ignored_stop': LLOp(canrun=True), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -196,19 +196,6 @@ def stm_abort_and_retry(funcgen, op): return 'stm_abort_transaction();' -def stm_abort_info_push(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) - return '//XXX stm_abort_info_push((gcptr)%s, %s);' % (arg0, arg1) - -def stm_abort_info_pop(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - return '//XXX stm_abort_info_pop(%s);' % (arg0,) - -def stm_inspect_abort_info(funcgen, op): - result = funcgen.expr(op.result) - return '%s = NULL; //XXX stm_inspect_abort_info();' % (result,) - def stm_ignored_start(funcgen, op): return '/* stm_ignored_start */' From noreply at buildbot.pypy.org Thu Apr 17 10:34:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 17 Apr 2014 10:34:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix? Message-ID: <20140417083436.86BBC1D2954@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70685:67f0c5eb1bd1 Date: 2014-04-17 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/67f0c5eb1bd1/ Log: Fix? diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -365,7 +365,7 @@ # we're returning with a guard_not_forced_2, and # additionally we need to say that r0 contains # a reference too: - self._finish_gcmap[0] |= r_uint(0) + self._finish_gcmap[0] |= r_uint(1) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish From noreply at buildbot.pypy.org Thu Apr 17 10:51:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 17 Apr 2014 10:51:54 +0200 (CEST) Subject: [pypy-commit] pypy default: A skipped test Message-ID: <20140417085154.D143F1D294F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70686:1f54c89f98a0 Date: 2014-04-17 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/1f54c89f98a0/ Log: A skipped test diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -167,6 +167,12 @@ def is_call_release_gil(self): return bool(self.call_release_gil_target) + def __repr__(self): + more = '' + if self.oopspecindex: + more = ' OS=%r' % (self.oopspecindex,) + return '' % (id(self), self.extraeffect, more) + def frozenset_or_none(x): if x is None: diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3418,6 +3418,25 @@ 'int_sub': 2, 'jump': 1, 'call': 2, 'guard_no_exception': 2, 'int_add': 4}) + def test_elidable_method(self): + py.test.skip("method sometimes @elidable and sometimes not") + class A(object): + def meth(self): + return 41 + class B(A): + @elidable + def meth(self): + return 42 + x = B() + def callme(x): + return x.meth() + def f(): + callme(A()) + return callme(x) + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'finish': 1}) + def test_look_inside_iff_const_getarrayitem_gc_pure(self): driver = JitDriver(greens=['unroll'], reds=['s', 'n']) From noreply at buildbot.pypy.org Thu Apr 17 11:28:59 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 11:28:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: try emitting less transaction breaks by adding some logic from Message-ID: <20140417092859.F2BC11D28EB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70687:bd3323514386 Date: 2014-04-17 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/bd3323514386/ Log: try emitting less transaction breaks by adding some logic from optimizeopt/stm.py to heapcache. diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -45,6 +45,13 @@ self.input_indirections = {} self.output_indirections = {} + + # to do some of the work of optimizeopt/stm.py, we have a similar + # logic here: + self.stm_break_wanted = True + + + def _input_indirection(self, box): return self.input_indirections.get(box, box) @@ -120,6 +127,9 @@ opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT): return + if opnum in (rop.GUARD_NOT_FORCED, rop.GUARD_NOT_FORCED_2): + self.stm_break_wanted = True + return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): @@ -184,6 +194,7 @@ del cache[frombox] return else: + self.stm_break_wanted = True # Only invalidate things that are either escaped or arguments for descr, boxes in self.heap_cache.iteritems(): for box in boxes.keys(): @@ -202,6 +213,8 @@ self.heap_cache.clear() self.heap_array_cache.clear() + self.stm_break_wanted = True + def is_class_known(self, box): return box in self.known_class_boxes @@ -308,3 +321,6 @@ def replace_box(self, oldbox, newbox): self.input_indirections[self._output_indirection(newbox)] = self._input_indirection(oldbox) self.output_indirections[self._input_indirection(oldbox)] = self._output_indirection(newbox) + + def stm_break_done(self): + self.stm_break_wanted = False diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -197,25 +197,33 @@ mi.vrefs_after_residual_call() mi.vable_after_residual_call() mi.generate_guard(rop.GUARD_NOT_FORCED, None) - - + self.metainterp.heapcache.stm_break_done() + + @arguments("int") def opimpl_stm_should_break_transaction(self, if_there_is_no_other): - from rpython.rtyper.lltypesystem import llmemory val = bool(if_there_is_no_other) mi = self.metainterp if val: + # app-level loop: only one of these per loop is really needed resbox = history.BoxInt(0) mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) + self.metainterp.heapcache.stm_break_done() return resbox else: - self._record_stm_transaction_break(False) + # between byte-code instructions: only keep if it is + # likely that we are inevitable here + if self.metainterp.heapcache.stm_break_wanted: + self._record_stm_transaction_break(False) return ConstInt(0) @arguments() def opimpl_stm_transaction_break(self): + # always wanted: inserted after we compile a bridge because there + # were just too many breaks and we failed the should_break&guard + # because of that self._record_stm_transaction_break(True) - + for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_floordiv', 'int_mod', 'int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', @@ -1692,7 +1700,7 @@ self.call_ids = [] self.current_call_id = 0 - + def retrace_needed(self, trace): self.partial_trace = trace @@ -1819,6 +1827,8 @@ if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: resumedescr = compile.ResumeGuardForcedDescr(self.staticdata, self.jitdriver_sd) + # for detecting stm breaks that are needed + self.heapcache.invalidate_caches(opnum, resumedescr, moreargs) elif opnum == rop.GUARD_NOT_INVALIDATED: resumedescr = compile.ResumeGuardNotInvalidated() else: diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -533,3 +533,27 @@ [] ) assert h.getarrayitem(box1, index1, descr1) is box3 + + def test_stm_break(self): + h = HeapCache() + assert h.stm_break_wanted + h.stm_break_done() + assert not h.stm_break_wanted + # loop headers + h.reset() + assert h.stm_break_wanted + h.stm_break_done() + assert not h.stm_break_wanted + # call that may make the transaction inevitable + h.invalidate_caches( + rop.CALL, FakeCallDescr(FakeEffectinfo.EF_RANDOM_EFFECTS), [box1] + ) + assert h.stm_break_wanted + h.stm_break_done() + # unknown op + h.invalidate_caches(rop.JIT_DEBUG, None, [box1, lengthbox2, box2]) + assert h.stm_break_wanted + h.stm_break_done() + # GUARD_NOT_FORCED + h.invalidate_caches(rop.GUARD_NOT_FORCED, None, []) + assert h.stm_break_wanted diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -43,16 +43,29 @@ self.interp_operations(g, [], translationoptions={"stm":True}) self.check_operations_history({'stm_transaction_break':1, 'guard_not_forced':1}) - - - + + def test_heapcache(self): + import time + def g(): + rstm.jit_stm_should_break_transaction(True) # keep (start of loop) + rstm.jit_stm_should_break_transaction(False) + time.sleep(0) + rstm.jit_stm_should_break_transaction(False) # keep (after guard_not_forced) + rstm.jit_stm_should_break_transaction(True) # keep (True) + rstm.jit_stm_should_break_transaction(True) # keep (True) + rstm.jit_stm_should_break_transaction(False) + return 42 + res = self.interp_operations(g, [], translationoptions={"stm":True}) + assert res == 42 + self.check_operations_history({ + 'stm_transaction_break':1, + 'stm_should_break_transaction':3, + 'guard_not_forced':2, + 'guard_no_exception':1, + 'call_may_force':1}) + + + class TestLLtype(STMTests, LLJitMixin): pass - - - - - - - diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -37,6 +37,7 @@ if we_are_translated(): llop.jit_stm_transaction_break_point(lltype.Void) + at specialize.arg(0) def jit_stm_should_break_transaction(if_there_is_no_other): # XXX REFACTOR AWAY # if_there_is_no_other means that we use this point only From noreply at buildbot.pypy.org Thu Apr 17 11:29:01 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 11:29:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: test one more case Message-ID: <20140417092901.4C07B1D28EB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70688:76eada48fdd8 Date: 2014-04-17 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/76eada48fdd8/ Log: test one more case diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -51,6 +51,7 @@ rstm.jit_stm_should_break_transaction(False) time.sleep(0) rstm.jit_stm_should_break_transaction(False) # keep (after guard_not_forced) + rstm.jit_stm_should_break_transaction(False) rstm.jit_stm_should_break_transaction(True) # keep (True) rstm.jit_stm_should_break_transaction(True) # keep (True) rstm.jit_stm_should_break_transaction(False) From noreply at buildbot.pypy.org Thu Apr 17 11:29:02 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 11:29:02 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Merge Message-ID: <20140417092902.D6EE91D28EB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70689:68bd542ca517 Date: 2014-04-17 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/68bd542ca517/ Log: Merge diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -56,10 +56,6 @@ return frame def enter(self, frame): - if self.space.config.translation.stm: - if not self.space.config.translation.jit: # XXX - from pypy.module.thread.stm import enter_frame - enter_frame(self, frame) frame.f_backref = self.topframeref self.topframeref = jit.virtual_ref(frame) @@ -81,11 +77,6 @@ frame_vref() jit.virtual_ref_finish(frame_vref, frame) - if self.space.config.translation.stm: - if not self.space.config.translation.jit: # XXX - from pypy.module.thread.stm import leave_frame - leave_frame(self, frame) - # ________________________________________________________________ def c_call_trace(self, frame, w_func, args=None): diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -41,21 +41,6 @@ if not we_are_translated() and not hasattr(ec, '_thread_local_dicts'): initialize_execution_context(ec) - at jit.dont_look_inside # XXX: handle abort_info_push in JIT -def enter_frame(ec, frame): - """Called from ExecutionContext.enter().""" - if frame.hide(): - return - rstm.abort_info_push(frame.pycode, ('[', 'co_filename', 'co_name', - 'co_firstlineno', 'co_lnotab')) - rstm.abort_info_push(frame, ('last_instr', ']')) - -def leave_frame(ec, frame): - """Called from ExecutionContext.leave().""" - if frame.hide(): - return - rstm.abort_info_pop(2) - class STMThreadLocals(BaseThreadLocals): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -859,11 +859,18 @@ # (ebp is a writeable object and does not need a write-barrier # again (ensured by the code calling the loop)) self.mc.MOV(ebx, self.heap_shadowstack_top()) - self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) - # MOV [ebx], ebp if self.cpu.gc_ll_descr.stm: + self.mc.MOV_mi((self.SEGMENT_NO, ebx.value, 0), + rstm.STM_STACK_MARKER_NEW) # MOV [ebx], MARKER_NEW + self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, WORD), + ebp.value) # MOV [ebx+WORD], ebp self.mc.MOV_sr(STM_OLD_SHADOWSTACK, ebx.value) - self.mc.ADD_ri(ebx.value, WORD) + # MOV [esp+xx], ebx + self.mc.ADD_ri(ebx.value, 2 * WORD) + else: + self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), + ebp.value) # MOV [ebx], ebp + self.mc.ADD_ri(ebx.value, WORD) self.mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx def _call_footer_shadowstack(self): diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -123,7 +123,6 @@ gct_stm_become_globally_unique_transaction = _gct_with_roots_pushed gct_stm_perform_transaction = _gct_with_roots_pushed - gct_stm_inspect_abort_info = _gct_with_roots_pushed gct_stm_threadlocalref_set = _gct_with_roots_pushed diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -12,6 +12,8 @@ TID = rffi.UINT tid_offset = CFlexSymbolic('offsetof(struct rpyobj_s, tid)') stm_nb_segments = CFlexSymbolic('STM_NB_SEGMENTS') +stm_stack_marker_new = CFlexSymbolic('STM_STACK_MARKER_NEW') +stm_stack_marker_old = CFlexSymbolic('STM_STACK_MARKER_OLD') adr_nursery_free = CFlexSymbolic('((long)&STM_SEGMENT->nursery_current)') adr_nursery_top = CFlexSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_pypy_stm_nursery_low_fill_mark = ( @@ -79,18 +81,6 @@ def is_atomic(): return llop.stm_get_atomic(lltype.Signed) -def abort_info_push(instance, fieldnames): - "Special-cased below." - - at dont_look_inside -def abort_info_pop(count): - if we_are_translated(): - llop.stm_abort_info_pop(lltype.Void, count) - - at dont_look_inside -def charp_inspect_abort_info(): - return llop.stm_inspect_abort_info(rffi.CCHARP) - @dont_look_inside def abort_and_retry(): llop.stm_abort_and_retry(lltype.Void) @@ -160,58 +150,6 @@ # ____________________________________________________________ -class AbortInfoPush(ExtRegistryEntry): - _about_ = abort_info_push - - def compute_result_annotation(self, s_instance, s_fieldnames): - from rpython.annotator.model import SomeInstance - assert isinstance(s_instance, SomeInstance) - assert s_fieldnames.is_constant() - assert isinstance(s_fieldnames.const, tuple) # tuple of names - - def specialize_call(self, hop): - fieldnames = hop.args_s[1].const - lst = [] - v_instance = hop.inputarg(hop.args_r[0], arg=0) - for fieldname in fieldnames: - if fieldname == '[': - lst.append(-2) # start of sublist - continue - if fieldname == ']': - lst.append(-1) # end of sublist - continue - fieldname = 'inst_' + fieldname - extraofs = None - STRUCT = v_instance.concretetype.TO - while not hasattr(STRUCT, fieldname): - STRUCT = STRUCT.super - TYPE = getattr(STRUCT, fieldname) - if TYPE == lltype.Signed: - kind = 1 - elif TYPE == lltype.Unsigned: - kind = 2 - elif TYPE == lltype.Ptr(rstr.STR): - kind = 3 - extraofs = llmemory.offsetof(rstr.STR, 'chars') - else: - raise NotImplementedError( - "abort_info_push(%s, %r): field of type %r" - % (STRUCT.__name__, fieldname, TYPE)) - lst.append(kind) - lst.append(llmemory.offsetof(STRUCT, fieldname)) - if extraofs is not None: - lst.append(extraofs) - lst.append(0) - ARRAY = rffi.CArray(lltype.Signed) - array = lltype.malloc(ARRAY, len(lst), flavor='raw', immortal=True) - for i in range(len(lst)): - array[i] = lst[i] - c_array = hop.inputconst(lltype.Ptr(ARRAY), array) - hop.exception_cannot_occur() - hop.genop('stm_abort_info_push', [v_instance, c_array]) - -# ____________________________________________________________ - class ThreadLocalReference(object): _COUNT = 1 diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -959,9 +959,6 @@ op_stm_get_atomic = _stm_not_implemented op_stm_change_atomic = _stm_not_implemented op_stm_set_transaction_length = _stm_not_implemented - op_stm_abort_info_push = _stm_not_implemented - op_stm_abort_info_pop = _stm_not_implemented - op_stm_inspect_abort_info = _stm_not_implemented op_stm_threadlocal_get = _stm_not_implemented op_stm_threadlocal_set = _stm_not_implemented op_stm_threadlocalref_get = _stm_not_implemented diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -448,10 +448,6 @@ 'stm_decrement_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), - 'stm_abort_info_push': LLOp(), - 'stm_abort_info_pop': LLOp(), - 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), - 'stm_ignored_start': LLOp(canrun=True), 'stm_ignored_stop': LLOp(canrun=True), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -196,19 +196,6 @@ def stm_abort_and_retry(funcgen, op): return 'stm_abort_transaction();' -def stm_abort_info_push(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) - return '//XXX stm_abort_info_push((gcptr)%s, %s);' % (arg0, arg1) - -def stm_abort_info_pop(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - return '//XXX stm_abort_info_pop(%s);' % (arg0,) - -def stm_inspect_abort_info(funcgen, op): - result = funcgen.expr(op.result) - return '%s = NULL; //XXX stm_inspect_abort_info();' % (result,) - def stm_ignored_start(funcgen, op): return '/* stm_ignored_start */' diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -a158a889e78b +918b1901b1f9 diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -380,8 +380,8 @@ struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(current->ss != (object_t *)-1); - mark_visit_object(current->ss, segment_base); + if (((uintptr_t)current->ss) > STM_STACK_MARKER_OLD) + mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -157,10 +157,32 @@ stm_thread_local_t *tl = STM_SEGMENT->running_thread; struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; - while (current-- != base) { - assert(current->ss != (object_t *)-1); - minor_trace_if_young(¤t->ss); + while (1) { + --current; + OPT_ASSERT(current >= base); + + switch ((uintptr_t)current->ss) { + + case 0: /* NULL */ + continue; + + case STM_STACK_MARKER_NEW: + /* the marker was not already seen: mark it as seen, + but continue looking more deeply in the shadowstack */ + current->ss = (object_t *)STM_STACK_MARKER_OLD; + continue; + + case STM_STACK_MARKER_OLD: + /* the marker was already seen: we can stop the + root stack tracing at this point */ + goto interrupt; + + default: + /* the stack entry is a regular pointer */ + minor_trace_if_young(¤t->ss); + } } + interrupt: minor_trace_if_young(&tl->thread_local_obj); } diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -154,11 +154,13 @@ struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; + STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD); } static void _done_shadow_stack(stm_thread_local_t *tl) { - assert(tl->shadowstack >= tl->shadowstack_base); + assert(tl->shadowstack > tl->shadowstack_base); + assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD); char *start = (char *)tl->shadowstack_base; _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -71,7 +71,7 @@ s_mutex_lock(); fprintf(stderr, "thread %p:\n", tl); for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %9u %.3f s\n", + fprintf(stderr, " %-24s %9u %8.3f s\n", timer_names[i], tl->events[i], (double)tl->timing[i]); } s_mutex_unlock(); diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -265,6 +265,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) +#define STM_STACK_MARKER_NEW 1 +#define STM_STACK_MARKER_OLD 2 /* Every thread needs to have a corresponding stm_thread_local_t From noreply at buildbot.pypy.org Thu Apr 17 13:16:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 17 Apr 2014 13:16:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140417111615.A6F811C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70692:50398d1a134d Date: 2014-04-17 13:15 +0200 http://bitbucket.org/pypy/pypy/changeset/50398d1a134d/ Log: fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -861,7 +861,7 @@ self.mc.MOV(ebx, self.heap_shadowstack_top()) if self.cpu.gc_ll_descr.stm: self.mc.MOV_mi((self.SEGMENT_NO, ebx.value, 0), - rstm.STM_STACK_MARKER_NEW) # MOV [ebx], MARKER_NEW + rstm.stm_stack_marker_new) # MOV [ebx], MARKER_NEW self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, WORD), ebp.value) # MOV [ebx+WORD], ebp self.mc.MOV_sr(STM_OLD_SHADOWSTACK, ebx.value) From noreply at buildbot.pypy.org Thu Apr 17 14:02:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 14:02:44 +0200 (CEST) Subject: [pypy-commit] pypy default: make sure winsock2.h precedes windows.h, since windows.h imports old winsock.h Message-ID: <20140417120244.9FBA61D2A9D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70693:81238d6d4adf Date: 2014-04-17 15:01 +0200 http://bitbucket.org/pypy/pypy/changeset/81238d6d4adf/ Log: make sure winsock2.h precedes windows.h, since windows.h imports old winsock.h diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -1,3 +1,6 @@ +#ifndef _THREAD_NT_H +#define _THREAD_NT_H +#include #include /* @@ -19,4 +22,4 @@ void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock); long RPyThreadGetStackSize(void); long RPyThreadSetStackSize(long); - +#endif From noreply at buildbot.pypy.org Thu Apr 17 15:02:04 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 15:02:04 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: wip Message-ID: <20140417130204.7BDF11D2840@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70694:7a493d5e93f3 Date: 2014-04-17 02:25 +0300 http://bitbucket.org/pypy/pypy/changeset/7a493d5e93f3/ Log: wip diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -45,6 +45,18 @@ from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray +class ScalarIter(object): + def __init__(self, array): + self.array = array + + def done(self): + return True + + def next(self): + pass + + def getitem(self): + return self.array.getitem(0) class PureShapeIter(object): def __init__(self, shape, idx_w): @@ -137,6 +149,7 @@ return self.array.getitem_bool(self.offset) def setitem(self, elem): + print 'setting',self.offset,'to',elem self.array.setitem(self.offset, elem) class SliceIterator(ArrayIter): diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -5,7 +5,7 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.strides import (calculate_broadcast_strides, shape_agreement, shape_agreement_multiple) -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator, ScalarIter from pypy.module.micronumpy.concrete import SliceArray from pypy.module.micronumpy.descriptor import decode_w_dtype from pypy.module.micronumpy import ufuncs @@ -205,6 +205,8 @@ def get_iter(space, order, arr, shape, dtype): imp = arr.implementation.astype(space, dtype) backward = is_backward(imp, order) + if arr.is_scalar(): + return ScalarIter(imp) if (imp.strides[0] < imp.strides[-1] and not backward) or \ (imp.strides[0] > imp.strides[-1] and backward): # flip the strides. Is this always true for multidimension? @@ -310,16 +312,19 @@ shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation - out_dtype = self.dtypes[0] if len(self.dtypes) > 0 else None - for i in range(len(self.seq)): - if self.seq[i] is None: - self.op_flags[i].get_it_item = (get_readwrite_item, + if len(self.dtypes) > 0: + out_dtype = self.dtypes[outargs[0]] + else: + out_dtype = None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, get_readwrite_slice) - self.op_flags[i].allocate = True - continue - if self.op_flags[i].rw == 'w': - continue - out_dtype = ufuncs.find_binop_result_dtype(space, + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype(space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: @@ -346,7 +351,7 @@ self.dtypes[i] = seq_d elif selfd != seq_d and not 'r' in self.op_flags[i].tmp_copy: raise OperationError(space.w_TypeError, space.wrap( - "Iterator operand required copying or buffering")) + "Iterator operand required copying or buffering for operand %d" % i)) else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -155,6 +155,17 @@ r.append(sqrt(x)) assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], + op_dtypes = ['int64', 'int64', 'float64'], + op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) + print 'starting the real mccoy' + for a, b, c in multi: + print 'in loop' + a[...] = b * c + print multi.operands[0] + print multi.operands[1] + print multi.operands[2] + assert (multi.operands[0] == [4, 6]).all() def test_casting(self): from numpy import arange, nditer From noreply at buildbot.pypy.org Thu Apr 17 15:02:05 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 15:02:05 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: handle scalars with existing ArrayIter Message-ID: <20140417130205.B1C2B1D2840@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70695:ec34892bca6f Date: 2014-04-17 09:32 +0300 http://bitbucket.org/pypy/pypy/changeset/ec34892bca6f/ Log: handle scalars with existing ArrayIter diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -45,19 +45,6 @@ from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray -class ScalarIter(object): - def __init__(self, array): - self.array = array - - def done(self): - return True - - def next(self): - pass - - def getitem(self): - return self.array.getitem(0) - class PureShapeIter(object): def __init__(self, shape, idx_w): self.shape = shape diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -206,7 +206,7 @@ imp = arr.implementation.astype(space, dtype) backward = is_backward(imp, order) if arr.is_scalar(): - return ScalarIter(imp) + return ArrayIter(imp, 1, [], [], []) if (imp.strides[0] < imp.strides[-1] and not backward) or \ (imp.strides[0] > imp.strides[-1] and backward): # flip the strides. Is this always true for multidimension? From noreply at buildbot.pypy.org Thu Apr 17 15:02:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 15:02:06 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: test, fix op_dtypes and scalar operands Message-ID: <20140417130206.DB8E01D2840@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70696:2b882cea5282 Date: 2014-04-17 10:20 +0300 http://bitbucket.org/pypy/pypy/changeset/2b882cea5282/ Log: test, fix op_dtypes and scalar operands diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -136,7 +136,6 @@ return self.array.getitem_bool(self.offset) def setitem(self, elem): - print 'setting',self.offset,'to',elem self.array.setitem(self.offset, elem) class SliceIterator(ArrayIter): diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -5,7 +5,7 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.strides import (calculate_broadcast_strides, shape_agreement, shape_agreement_multiple) -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator, ScalarIter +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.concrete import SliceArray from pypy.module.micronumpy.descriptor import decode_w_dtype from pypy.module.micronumpy import ufuncs @@ -203,7 +203,7 @@ raise NotImplementedError('not implemented yet') def get_iter(space, order, arr, shape, dtype): - imp = arr.implementation.astype(space, dtype) + imp = arr.implementation backward = is_backward(imp, order) if arr.is_scalar(): return ArrayIter(imp, 1, [], [], []) @@ -349,9 +349,13 @@ seq_d = self.seq[i].get_dtype() if not selfd: self.dtypes[i] = seq_d - elif selfd != seq_d and not 'r' in self.op_flags[i].tmp_copy: - raise OperationError(space.w_TypeError, space.wrap( - "Iterator operand required copying or buffering for operand %d" % i)) + elif selfd != seq_d: + if not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering for operand %d" % i)) + impl = self.seq[i].implementation + new_impl = impl.astype(space, selfd) + self.seq[i] = W_NDimArray(new_impl) else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] @@ -364,7 +368,6 @@ self.iters.append(BoxIterator(get_iter(space, self.order, self.seq[i], iter_shape, self.dtypes[i]), self.op_flags[i])) - def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -158,13 +158,8 @@ multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], op_dtypes = ['int64', 'int64', 'float64'], op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) - print 'starting the real mccoy' for a, b, c in multi: - print 'in loop' a[...] = b * c - print multi.operands[0] - print multi.operands[1] - print multi.operands[2] assert (multi.operands[0] == [4, 6]).all() def test_casting(self): From noreply at buildbot.pypy.org Thu Apr 17 15:02:08 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 15:02:08 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: add comments for long confusing code Message-ID: <20140417130208.019C41D2840@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70697:2f4b0bd4e54b Date: 2014-04-17 10:29 +0300 http://bitbucket.org/pypy/pypy/changeset/2f4b0bd4e54b/ Log: add comments for long confusing code diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -282,17 +282,22 @@ self.done = False self.first_next = True self.op_axes = [] + # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] else: self.seq =[convert_to_array(space, w_seq)] + parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) + # handle w_op_axes if not space.is_none(w_op_axes): self.set_op_axes(space, w_op_axes) + + # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] @@ -301,6 +306,8 @@ "op_dtypes must be a tuple/list matching the number of ops")) else: self.dtypes = [] + + # handle None or writable operands, calculate my shape self.iters=[] outargs = [i for i in range(len(self.seq)) \ if self.seq[i] is None or self.op_flags[i].rw == 'w'] @@ -334,6 +341,7 @@ if not self.op_flags[i].broadcast: # Raises if ooutput cannot be broadcast shape_agreement(space, iter_shape, self.seq[i], False) + if self.tracked_index != "": if self.order == "K": self.order = self.seq[0].implementation.order @@ -342,8 +350,9 @@ else: backward = self.order != self.tracked_index self.index_iter = IndexIterator(iter_shape, backward=backward) + + # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: - # Make sure dtypes make sense for i in range(len(self.seq)): selfd = self.dtypes[i] seq_d = self.seq[i].get_dtype() @@ -359,6 +368,8 @@ else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] + + # create an iterator for each operand if self.external_loop: for i in range(len(self.seq)): self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, From noreply at buildbot.pypy.org Thu Apr 17 15:02:11 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 15:02:11 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: merge default into branch Message-ID: <20140417130211.58ED51D2840@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70698:18a10f26926b Date: 2014-04-17 11:13 +0300 http://bitbucket.org/pypy/pypy/changeset/18a10f26926b/ Log: merge default into branch diff too long, truncating to 2000 out of 2455 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -217,7 +217,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withrangelist", diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,3 +1,6 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less @@ -315,6 +335,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -318,7 +318,7 @@ To read more about the RPython limitations read the `RPython description`_. -.. _`RPython description`: coding-guide.html#restricted-python +.. _`RPython description`: coding-guide.html#rpython-definition --------------------------------------------------------------- Does RPython have anything to do with Zope's Restricted Python? @@ -459,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -92,7 +92,9 @@ `D07.1 Massive Parallelism and Translation Aspects`_ is a report about PyPy's optimization efforts, garbage collectors and massive parallelism (stackless) features. This report refers to the paper `PyPy's approach -to virtual machine construction`_. *(2007-02-28)* +to virtual machine construction`_. Extends the content previously +available in the document "Memory management and threading models as +translation aspects -- solutions and challenges". *(2007-02-28)* diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,19 +15,19 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. Thanks to all donors -for crowd-funding the work so far! Please have a look at the 2nd call -for donation (*not ready yet*) +This work was done mostly by Remi Meier and Armin Rigo. Thanks to all +donors for crowd-funding the work so far! Please have a look at the +`2nd call for donation`_. -.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction ============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of a -regular PyPy, comparing the JITting version in both cases. It is called +listed below, it should be in theory within 25%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -55,9 +55,9 @@ interested in trying it out, you can download a Ubuntu 12.04 binary here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel -(in other words, you get a GIL effect again, but only if trying to -execute more than 4 threads). +"segments", which means that it will run up to four threads in parallel, +in other words it is running a thread pool up to 4 threads emulating normal +threads. To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,4 +133,9 @@ .. branch: ast-issue1673 fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field \ No newline at end of file +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -337,6 +337,9 @@ return 'internal subclass of %s' % (Class.__name__,) wrappable_class_name._annspecialcase_ = 'specialize:memo' +class CannotHaveLock(Exception): + """Raised by space.allocate_lock() if we're translating.""" + # ____________________________________________________________ class ObjSpace(object): @@ -440,10 +443,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -458,15 +462,23 @@ raise oefmt(self.w_SystemError, "getbuiltinmodule() called with non-builtin module %s", name) - else: - # Initialize the module - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - w_mod.init(self) - # Add the module to sys.modules + # Add the module to sys.modules and initialize the module. The + # order is important to avoid recursions. + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) eventlet + # patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 self.setitem(w_modules, w_name, w_mod) - return w_mod + w_mod.init(self) + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" @@ -663,6 +675,11 @@ def __allocate_lock(self): from rpython.rlib.rthread import allocate_lock, error + # hack: we can't have prebuilt locks if we're translating. + # In this special situation we should just not lock at all + # (translation is not multithreaded anyway). + if not we_are_translated() and self.config.translating: + raise CannotHaveLock() try: return allocate_lock() except error: diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -14,6 +14,7 @@ # after startup(). w_initialdict = None lazy = False + submodule_name = None def __init__(self, space, w_name): """ NOT_RPYTHON """ @@ -31,6 +32,8 @@ space = self.space name = space.unwrap(self.w_name) for sub_name, module_cls in self.submodules.iteritems(): + if module_cls.submodule_name is None: + module_cls.submodule_name = sub_name module_name = space.wrap("%s.%s" % (name, sub_name)) m = module_cls(space, module_name) m.install() @@ -134,6 +137,8 @@ cls.loaders = loaders = {} pkgroot = cls.__module__ appname = cls.get_applevel_name() + if cls.submodule_name is not None: + appname += '.%s' % (cls.submodule_name,) for name, spec in cls.interpleveldefs.items(): loaders[name] = getinterpevalloader(pkgroot, spec) for name, spec in cls.appleveldefs.items(): diff --git a/pypy/module/__pypy__/app_signal.py b/pypy/module/__pypy__/app_signal.py --- a/pypy/module/__pypy__/app_signal.py +++ b/pypy/module/__pypy__/app_signal.py @@ -1,4 +1,9 @@ -import __pypy__.thread +import thread +# ^^ relative import of __pypy__.thread. Note that some tests depend on +# this (test_enable_signals in test_signal.py) to work properly, +# otherwise they get caught in some deadlock waiting for the import +# lock... + class SignalsEnabled(object): '''A context manager to use in non-main threads: @@ -8,7 +13,7 @@ that is within a "with signals_enabled:". This other thread should be ready to handle unexpected exceptions that the signal handler might raise --- notably KeyboardInterrupt.''' - __enter__ = __pypy__.thread._signals_enter - __exit__ = __pypy__.thread._signals_exit + __enter__ = thread._signals_enter + __exit__ = thread._signals_exit signals_enabled = SignalsEnabled() diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -8,6 +8,7 @@ def test_signal(self): from __pypy__ import thread + assert type(thread.signals_enabled).__module__ == '__pypy__.thread' with thread.signals_enabled: pass # assert did not crash diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1091,7 +1091,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1101,7 +1101,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1113,7 +1113,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -48,11 +48,9 @@ raises(OverflowError, _cffi_backend.newp, SHORT_ARRAY, [-40000]) def test_fast_init_longlong_from_list(self): - if type(2 ** 50) is long: - large_int = 2 ** 30 - else: - large_int = 2 ** 50 import _cffi_backend + import sys + large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) LONGLONG = _cffi_backend.new_primitive_type('long long') P_LONGLONG = _cffi_backend.new_pointer_type(LONGLONG) LONGLONG_ARRAY = _cffi_backend.new_array_type(P_LONGLONG, None) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -137,6 +137,7 @@ self.check_mode_ok(mode) stream = streamio.fdopen_as_stream(fd, mode, buffering, signal_checker(self.space)) + self.check_not_dir(fd) self.fdopenstream(stream, fd, mode) def direct_close(self): diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -4,7 +4,7 @@ from rpython.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import ObjSpace, W_Root +from pypy.interpreter.baseobjspace import ObjSpace, W_Root, CannotHaveLock from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror @@ -33,19 +33,24 @@ def _try_acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock - if self.slock is None: - self.slock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.slockowner is me: return False # already acquired by the current thread - self.slock.acquire(True) + try: + if self.slock is None: + self.slock = self.space.allocate_lock() + except CannotHaveLock: + pass + else: + self.slock.acquire(True) assert self.slockowner is None self.slockowner = me return True def _release_lock(self): self.slockowner = None - self.slock.release() + if self.slock is not None: + self.slock.release() def lock(self): if not self._try_acquire_lock(): diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -1,7 +1,6 @@ import py from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir UNICODE_REPLACEMENT_CHARACTER = u'\uFFFD' @@ -15,7 +14,7 @@ return 'EncodeDecodeError(%r, %r, %r)' % (self.start, self.end, self.reason) -srcdir = py.path.local(cdir) +srcdir = py.path.local(__file__).dirpath() codecs = [ # _codecs_cn diff --git a/rpython/translator/c/src/cjkcodecs/README b/pypy/module/_multibytecodec/src/cjkcodecs/README rename from rpython/translator/c/src/cjkcodecs/README rename to pypy/module/_multibytecodec/src/cjkcodecs/README diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_cn.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c rename from rpython/translator/c/src/cjkcodecs/_codecs_cn.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_hk.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c rename from rpython/translator/c/src/cjkcodecs/_codecs_hk.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c rename from rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_jp.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c rename from rpython/translator/c/src/cjkcodecs/_codecs_jp.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_kr.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c rename from rpython/translator/c/src/cjkcodecs/_codecs_kr.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_tw.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c rename from rpython/translator/c/src/cjkcodecs/_codecs_tw.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c diff --git a/rpython/translator/c/src/cjkcodecs/alg_jisx0201.h b/pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h rename from rpython/translator/c/src/cjkcodecs/alg_jisx0201.h rename to pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h diff --git a/rpython/translator/c/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h rename from rpython/translator/c/src/cjkcodecs/cjkcodecs.h rename to pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h diff --git a/rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h b/pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h rename from rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h rename to pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_cn.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h rename from rpython/translator/c/src/cjkcodecs/mappings_cn.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_hk.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h rename from rpython/translator/c/src/cjkcodecs/mappings_hk.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h rename from rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jp.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h rename from rpython/translator/c/src/cjkcodecs/mappings_jp.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_kr.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h rename from rpython/translator/c/src/cjkcodecs/mappings_kr.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_tw.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h rename from rpython/translator/c/src/cjkcodecs/mappings_tw.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c rename from rpython/translator/c/src/cjkcodecs/multibytecodec.c rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h rename from rpython/translator/c/src/cjkcodecs/multibytecodec.h rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,5 +1,6 @@ import py + at py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: py.test.skip("genreflex is not installed") diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,13 +2,13 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat +import sys, os, stat, genericpath from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, generic_new_descr from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock from pypy.interpreter.eval import Code from pypy.interpreter.pycode import PyCode from rpython.rlib import streamio, jit @@ -522,7 +522,8 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - if os.path.isdir(filepart) and case_ok(filepart): + # os.path.isdir on win32 is not rpython when pywin32 installed + if genericpath.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): @@ -579,7 +580,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None @@ -753,26 +755,14 @@ me = self.space.getexecutioncontext() # used as thread ident return self.lockowner is me - def _can_have_lock(self): - # hack: we can't have self.lock != None during translation, - # because prebuilt lock objects are not allowed. In this - # special situation we just don't lock at all (translation is - # not multithreaded anyway). - if we_are_translated(): - return True # we need a lock at run-time - elif self.space.config.translating: - assert self.lock is None - return False - else: - return True # in py.py - def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock if self.lock is None: - if not self._can_have_lock(): + try: + self.lock = self.space.allocate_lock() + except CannotHaveLock: return - self.lock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is me: pass # already acquired by the current thread @@ -790,7 +780,7 @@ # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return - if not self._can_have_lock(): + if self.lock is None: # CannotHaveLock occurred return space = self.space raise OperationError(space.w_RuntimeError, diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,7 +203,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -223,7 +222,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,7 +578,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -586,20 +585,19 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import sys, time oldpath = sys.path - time.tzset = "" + time.tzname = "" del sys.modules['time'] import time as time1 assert sys.modules['time'] is time1 - assert time.tzset == "" + assert time.tzname == "" - reload(time1) # don't leave a broken time.tzset behind + reload(time1) # don't leave a broken time.tzname behind import time - assert time.tzset != "" + assert time.tzname != "" def test_reload_infinite(self): import infinite_reload diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,7 +23,6 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', - 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import math import _numpypy diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -37,7 +37,7 @@ "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -388,6 +388,8 @@ w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': w_res = w_lhs.descr_sub(interp.space, w_rhs) + elif self.name == '**': + w_res = w_lhs.descr_pow(interp.space, w_rhs) elif self.name == '->': if isinstance(w_rhs, FloatObject): w_rhs = IntObject(int(w_rhs.floatval)) @@ -596,6 +598,8 @@ arg = self.args[1].execute(interp) if self.name == 'view': w_res = arr.descr_view(interp.space, arg) + elif self.name == 'astype': + w_res = arr.descr_astype(interp.space, arg) else: assert False else: @@ -620,7 +624,7 @@ (':', 'colon'), ('\w+', 'identifier'), ('\]', 'array_right'), - ('(->)|[\+\-\*\/]', 'operator'), + ('(->)|[\+\-\*\/]+', 'operator'), ('=', 'assign'), (',', 'comma'), ('\|', 'pipe'), diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -458,13 +458,6 @@ return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) - def readonly(self): - return NonWritableSlice(self.start, self.strides, self.backstrides, self.shape, self.parent, self.orig_arr, self.dtype) - -class NonWritableSlice(SliceArray): - def descr_setitem(self, space, orig_array, w_index, w_value): - raise OperationError(space.w_ValueError, space.wrap( - "assignment destination is read-only")) class VoidBoxStorage(BaseConcreteArray): def __init__(self, size, dtype): diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,7 +42,6 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support -from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray class PureShapeIter(object): @@ -97,25 +96,29 @@ self.indices = [0] * len(self.shape_m1) self.offset = self.array.start + @jit.unroll_safe def next(self): self.index += 1 for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < self.shape_m1[i]: - self.indices[i] += 1 + idx = self.indices[i] + if idx < self.shape_m1[i]: + self.indices[i] = idx + 1 self.offset += self.strides[i] break else: self.indices[i] = 0 self.offset -= self.backstrides[i] + @jit.unroll_safe def next_skip_x(self, step): assert step >= 0 if step == 0: return self.index += step for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < (self.shape_m1[i] + 1) - step: - self.indices[i] += step + idx = self.indices[i] + if idx < (self.shape_m1[i] + 1) - step: + self.indices[i] = idx + step self.offset += self.strides[i] * step break else: @@ -138,37 +141,6 @@ def setitem(self, elem): self.array.setitem(self.offset, elem) -class SliceIterator(ArrayIter): - def __init__(self, arr, strides, backstrides, shape, order="C", - backward=False, dtype=None): - if dtype is None: - dtype = arr.implementation.dtype - self.dtype = dtype - self.arr = arr - if backward: - self.slicesize = shape[0] - self.gap = [support.product(shape[1:]) * dtype.elsize] - strides = strides[1:] - backstrides = backstrides[1:] - shape = shape[1:] - strides.reverse() - backstrides.reverse() - shape.reverse() - size = support.product(shape) - else: - shape = [support.product(shape)] - strides, backstrides = calc_strides(shape, dtype, order) - size = 1 - self.slicesize = support.product(shape) - self.gap = strides - - ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) - - def getslice(self): - from pypy.module.micronumpy.concrete import SliceArray - retVal = SliceArray(self.offset, self.gap, self.backstrides, - [self.slicesize], self.arr.implementation, self.arr, self.dtype) - return retVal def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,16 +282,14 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list, shape=None): +def shape_agreement_multiple(space, array_list): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - for arr in array_list: + shape = array_list[0].get_shape() + for arr in array_list[1:]: if not space.is_none(arr): - if shape is None: - shape = arr.get_shape() - else: - shape = shape_agreement(space, shape, arr) + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -319,3 +319,14 @@ ''') results = interp.results[0] assert isinstance(results, W_NDimArray) + + def test_astype_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = astype(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + assert results.get_dtype().is_int() diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -94,14 +94,58 @@ a -> 3 """ - def test_floatadd(self): + def test_float_add(self): result = self.run("float_add") assert result == 3 + 3 - py.test.skip("don't run for now") - self.check_simple_loop({"raw_load": 1, "float_add": 1, - "raw_store": 1, "int_add": 1, - "int_ge": 1, "guard_false": 1, "jump": 1, - 'arraylen_gc': 1}) + self.check_trace_count(1) + self.check_simple_loop({ + 'float_add': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 3, + 'int_add': 9, + 'int_ge': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) + + def define_pow(): + return """ + a = |30| ** 2 + a -> 3 + """ + + def test_pow(self): + result = self.run("pow") + assert result == 3 ** 2 + self.check_trace_count(1) + self.check_simple_loop({ + 'call': 3, + 'float_add': 1, + 'float_eq': 3, + 'float_mul': 2, + 'float_ne': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 4, + 'guard_not_invalidated': 1, + 'guard_true': 5, + 'int_add': 9, + 'int_ge': 1, + 'int_is_true': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) def define_sum(): return """ @@ -482,16 +526,19 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'call': 2, - 'getfield_gc': 2, - 'guard_no_exception': 2, + 'getarrayitem_gc': 2, + 'getfield_gc': 4, 'guard_not_invalidated': 1, - 'guard_true': 1, + 'guard_true': 3, + 'int_add': 6, 'int_gt': 1, + 'int_lt': 2, 'int_sub': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, + 'setarrayitem_gc': 2, + 'setfield_gc': 4, }) def define_dot(): @@ -506,36 +553,43 @@ result = self.run("dot") assert result == 184 self.check_trace_count(3) - self.check_simple_loop({'float_add': 1, - 'float_mul': 1, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 3, - 'int_lt': 1, - 'jump': 1, - 'raw_load': 2}) - self.check_resops({'arraylen_gc': 1, - 'call': 3, - 'float_add': 2, - 'float_mul': 2, - 'getfield_gc': 26, - 'getfield_gc_pure': 24, - 'guard_class': 4, - 'guard_false': 2, - 'guard_no_exception': 3, - 'guard_nonnull': 12, - 'guard_nonnull_class': 4, - 'guard_not_invalidated': 2, - 'guard_true': 9, - 'guard_value': 4, - 'int_add': 6, - 'int_ge': 3, - 'int_lt': 4, - 'jump': 3, - 'new_array': 1, - 'raw_load': 6, - 'raw_store': 1, - 'setfield_gc': 3}) + self.check_simple_loop({ + 'float_add': 1, + 'float_mul': 1, + 'guard_not_invalidated': 1, + 'guard_true': 1, + 'int_add': 3, + 'int_lt': 1, + 'jump': 1, + 'raw_load': 2, + }) + self.check_resops({ + 'arraylen_gc': 1, + 'float_add': 2, + 'float_mul': 2, + 'getarrayitem_gc': 7, + 'getarrayitem_gc_pure': 15, + 'getfield_gc': 35, + 'getfield_gc_pure': 39, + 'guard_class': 4, + 'guard_false': 14, + 'guard_nonnull': 12, + 'guard_nonnull_class': 4, + 'guard_not_invalidated': 2, + 'guard_true': 13, + 'guard_value': 4, + 'int_add': 25, + 'int_ge': 4, + 'int_le': 8, + 'int_lt': 11, + 'int_sub': 4, + 'jump': 3, + 'new_array': 1, + 'raw_load': 6, + 'raw_store': 1, + 'setarrayitem_gc': 8, + 'setfield_gc': 15, + }) def define_argsort(): return """ diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -442,6 +442,7 @@ return v1 % v2 @simple_binary_op + @jit.look_inside_iff(lambda self, v1, v2: jit.isconstant(v2)) def pow(self, v1, v2): if v2 < 0: return 0 diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -19,7 +19,7 @@ usemodules += ['fcntl'] else: # On windows, os.popen uses the subprocess module - usemodules += ['_rawffi', 'thread'] + usemodules += ['_rawffi', 'thread', 'signal'] mod.space = gettestobjspace(usemodules=usemodules) mod.path = udir.join('posixtestfile.txt') mod.path.write("this is a test") @@ -305,6 +305,17 @@ finally: __builtins__.file = _file + def test_fdopen_directory(self): + import errno + os = self.posix + try: + fd = os.open('.', os.O_RDONLY) + except OSError as e: + assert e.errno == errno.EACCES + skip("system cannot open directories") + exc = raises(IOError, os.fdopen, fd, 'r') + assert exc.value.errno == errno.EISDIR + def test_getcwd(self): assert isinstance(self.posix.getcwd(), str) assert isinstance(self.posix.getcwdu(), unicode) @@ -340,7 +351,6 @@ else: assert (unicode, u) in typed_result - def test_access(self): pdir = self.pdir + '/file1' posix = self.posix @@ -351,7 +361,6 @@ if sys.platform != "win32": assert not posix.access(pdir, posix.X_OK) - def test_times(self): """ posix.times() should return a five-tuple giving float-representations @@ -1156,8 +1165,8 @@ res = os.system(cmd) assert res == 0 + class AppTestPosixUnicode: - def setup_class(cls): cls.space = space cls.w_posix = space.appexec([], GET_POSIX) @@ -1198,6 +1207,7 @@ except OSError: pass + class AppTestUnicodeFilename: def setup_class(cls): ufilename = (unicode(udir.join('test_unicode_filename_')) + diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -2,27 +2,33 @@ Logic to find sys.executable and the initial sys.path containing the stdlib """ -import sys +import errno import os import stat -import errno +import sys + from rpython.rlib import rpath from rpython.rlib.objectmodel import we_are_translated + from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state -platform = sys.platform -IS_WINDOWS = sys.platform == 'win32' +PLATFORM = sys.platform +_MACOSX = sys.platform == 'darwin' +_WIN32 = sys.platform == 'win32' + def find_executable(executable): """ - Return the absolute path of the executable, by looking into PATH and the - current directory. If it cannot be found, return ''. + Return the absolute path of the executable, by looking into PATH and + the current directory. If it cannot be found, return ''. """ - if we_are_translated() and IS_WINDOWS and not executable.lower().endswith('.exe'): + if (we_are_translated() and _WIN32 and + not executable.lower().endswith('.exe')): executable += '.exe' - if os.sep in executable or (IS_WINDOWS and ':' in executable): - pass # the path is already more than just an executable name + if os.sep in executable or (_WIN32 and ':' in executable): + # the path is already more than just an executable name + pass else: path = os.environ.get('PATH') if path: @@ -35,15 +41,15 @@ # 'sys.executable' should not end up being an non-existing file; # just use '' in this case. (CPython issue #7774) - if not os.path.isfile(executable): - executable = '' - return executable + return executable if os.path.isfile(executable) else '' + def _readlink_maybe(filename): - if not IS_WINDOWS: + if not _WIN32: return os.readlink(filename) raise NotImplementedError + def resolvedirof(filename): filename = rpath.rabspath(filename) dirname = rpath.rabspath(os.path.join(filename, '..')) @@ -56,36 +62,37 @@ return resolvedirof(os.path.join(dirname, link)) return dirname + def find_stdlib(state, executable): """ Find and compute the stdlib path, starting from the directory where - ``executable`` is and going one level up until we find it. Return a tuple - (path, prefix), where ``prefix`` is the root directory which contains the - stdlib. - If it cannot be found, return (None, None). + ``executable`` is and going one level up until we find it. Return a + tuple (path, prefix), where ``prefix`` is the root directory which + contains the stdlib. If it cannot be found, return (None, None). """ - if executable == '': - executable = 'pypy-c' - search = executable + search = 'pypy-c' if executable == '' else executable while True: dirname = resolvedirof(search) if dirname == search: - return None, None # not found :-( + return None, None # not found :-( newpath = compute_stdlib_path_maybe(state, dirname) if newpath is not None: return newpath, dirname search = dirname # walk to the parent directory + def _checkdir(path): st = os.stat(path) if not stat.S_ISDIR(st[0]): raise OSError(errno.ENOTDIR, path) + def compute_stdlib_path(state, prefix): """ - Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` must at - least contain a directory called ``lib-python/X.Y`` and another one called - ``lib_pypy``. If they cannot be found, it raises OSError. + Compute the paths for the stdlib rooted at ``prefix``. ``prefix`` + must at least contain a directory called ``lib-python/X.Y`` and + another one called ``lib_pypy``. If they cannot be found, it raises + OSError. """ from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d' % (CPYTHON_VERSION[0], @@ -111,41 +118,42 @@ importlist.append(lib_tk) # List here the extra platform-specific paths. - if platform != 'win32': - importlist.append(os.path.join(python_std_lib, 'plat-'+platform)) - if platform == 'darwin': + if not _WIN32: + importlist.append(os.path.join(python_std_lib, 'plat-' + PLATFORM)) + if _MACOSX: platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages')) return importlist + def compute_stdlib_path_maybe(state, prefix): - """ - Return the stdlib path rooted at ``prefix``, or None if it cannot be - found. + """Return the stdlib path rooted at ``prefix``, or None if it cannot + be found. """ try: return compute_stdlib_path(state, prefix) except OSError: return None + @unwrap_spec(executable='str0') def pypy_find_executable(space, executable): return space.wrap(find_executable(executable)) + @unwrap_spec(filename='str0') def pypy_resolvedirof(space, filename): return space.wrap(resolvedirof(filename)) + @unwrap_spec(executable='str0') def pypy_find_stdlib(space, executable): path, prefix = find_stdlib(get_state(space), executable) if path is None: return space.w_None - else: - space.setitem(space.sys.w_dict, space.wrap('prefix'), - space.wrap(prefix)) - space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), - space.wrap(prefix)) - return space.newlist([space.wrap(p) for p in path]) + w_prefix = space.wrap(prefix) + space.setitem(space.sys.w_dict, space.wrap('prefix'), w_prefix) + space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), w_prefix) + return space.newlist([space.wrap(p) for p in path]) diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -84,7 +84,7 @@ assert find_executable('pypy') == a.join('pypy') # monkeypatch.setattr(initpath, 'we_are_translated', lambda: True) - monkeypatch.setattr(initpath, 'IS_WINDOWS', True) + monkeypatch.setattr(initpath, '_WIN32', True) monkeypatch.setenv('PATH', str(a)) a.join('pypy.exe').ensure(file=True) assert find_executable('pypy') == a.join('pypy.exe') diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -656,6 +656,10 @@ return self.len - self.pos return 0 + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class BaseKeyIterator(BaseIteratorImplementation): next_key = _new_next('key') @@ -1191,6 +1195,10 @@ w_ret = space.newtuple([new_inst, space.newtuple([w_res])]) return w_ret + def _cleanup_(self): + raise Exception("seeing a prebuilt %r object" % ( + self.__class__,)) + class W_DictMultiIterKeysObject(W_BaseDictMultiIterObject): def descr_next(self, space): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -707,7 +707,7 @@ raise OperationError(space.w_ValueError, space.wrap("list modified during sort")) -find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') +find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find') class ListStrategy(object): @@ -733,8 +733,9 @@ space = self.space i = start # needs to be safe against eq_w mutating stuff + tp = space.type(w_item) while i < stop and i < w_list.length(): - find_jmp.jit_merge_point() + find_jmp.jit_merge_point(tp=tp) if space.eq_w(w_list.getitem(i), w_item): return i i += 1 diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -873,8 +873,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a TypeCell, which may change without changing the version_tag - assert space.config.objspace.std.withmethodcache - _, w_descr = w_type._pure_lookup_where_with_method_cache( + _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # selector = ("", INVALID) @@ -932,9 +931,8 @@ # in the class, this time taking care of the result: it can be either a # quasi-constant class attribute, or actually a TypeCell --- which we # must not cache. (It should not be None here, but you never know...) - assert space.config.objspace.std.withmethodcache - _, w_method = w_type._pure_lookup_where_with_method_cache(name, - version_tag) + _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( + name, version_tag) if w_method is None or isinstance(w_method, TypeCell): return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1091,6 +1091,7 @@ def _intersect_wrapped(self, w_set, w_other): result = newset(self.space) for key in self.unerase(w_set.sstorage): + self.intersect_jmp.jit_merge_point() w_key = self.wrap(key) if w_other.has_key(w_key): result[w_key] = None @@ -1201,6 +1202,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(bytes).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1237,6 +1241,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(unicode).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1273,6 +1280,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(int).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1311,6 +1321,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(object).intersect') + def get_empty_storage(self): return self.erase(self.get_empty_dict()) @@ -1355,6 +1368,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(identity).intersect') + def get_empty_storage(self): return self.erase({}) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1277,7 +1277,6 @@ class TestBytesDictImplementation(BaseTestRDictImplementation): StrategyClass = BytesDictStrategy - #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1289,9 +1288,6 @@ self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) -## class TestMeasuringDictImplementation(BaseTestRDictImplementation): -## ImplementionClass = MeasuringDictImplementation -## DevolvedClass = MeasuringDictImplementation class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -27,7 +27,7 @@ jit.loop_unrolling_heuristic(other, other.length(), UNROLL_CUTOFF)) -contains_jmp = jit.JitDriver(greens = [], reds = 'auto', +contains_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'tuple.contains') class W_AbstractTupleObject(W_Root): @@ -136,8 +136,9 @@ return space.w_False def _descr_contains_jmp(self, space, w_obj): + tp = space.type(w_obj) for w_item in self.tolist(): - contains_jmp.jit_merge_point() + contains_jmp.jit_merge_point(tp=tp) if space.eq_w(w_item, w_obj): return space.w_True return space.w_False diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -371,6 +371,12 @@ w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) return w_class, unwrap_cell(space, w_value) + def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): + if w_self.space.config.objspace.std.withmethodcache: + return w_self._pure_lookup_where_with_method_cache(name, version_tag) + else: + return w_self._lookup_where_all_typeobjects(name) + @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -25,7 +25,7 @@ # self.path = space.unwrap(space.getattr( self.path = py.path.local(space.str_w(self.w_file)) self.space = space - + def fullsource(self): filename = self.space.str_w(self.w_file) source = py.code.Source(py.std.linecache.getlines(filename)) @@ -106,27 +106,28 @@ def exconly(self, tryshort=True): return '(application-level) ' + self.operr.errorstr(self.space) - def errisinstance(self, exc): - clsname = exc.__name__ + def errisinstance(self, exc): + clsname = exc.__name__ # we can only check for builtin exceptions # as there is no canonical applevel one for custom interplevel ones if exc.__module__ != "exceptions": - return False - try: - w_exc = getattr(self.space, 'w_' + clsname) - except KeyboardInterrupt: - raise - except: - pass - else: - return self.operr.match(self.space, w_exc) - return False + return False + try: + w_exc = getattr(self.space, 'w_' + clsname) + except KeyboardInterrupt: + raise + except: + pass + else: + return self.operr.match(self.space, w_exc) + return False def __str__(self): return '(application-level) ' + self.operr.errorstr(self.space) class AppTracebackEntry(py.code.Traceback.Entry): exprinfo = None + frame = None def __init__(self, space, tb): self.frame = AppFrame(space, space.getattr(tb, space.wrap('tb_frame'))) @@ -142,8 +143,11 @@ # XXX this reinterpret() is only here to prevent reinterpretation. return self.exprinfo -class AppTraceback(py.code.Traceback): - Entry = AppTracebackEntry + def ishidden(self): + return False + +class AppTraceback(py.code.Traceback): + Entry = AppTracebackEntry def __init__(self, space, apptb): l = [] @@ -151,7 +155,7 @@ l.append(self.Entry(space, apptb)) apptb = space.getattr(apptb, space.wrap('tb_next')) list.__init__(self, l) - + # ____________________________________________________________ def build_pytest_assertion(space): @@ -163,10 +167,10 @@ ## # Argh! we may see app-level helpers in the frame stack! ## # that's very probably very bad... ## ^^^the above comment may be outdated, but we are not sure - + # if the assertion provided a message, don't do magic args_w, kwargs_w = __args__.unpack() - if args_w: + if args_w: w_msg = args_w[0] else: frame = space.getexecutioncontext().gettopframe() @@ -174,7 +178,7 @@ try: source = runner.statement source = str(source).strip() - except py.error.ENOENT: + except py.error.ENOENT: source = None from pypy import conftest if source and py.test.config._assertstate.mode != "off": @@ -187,7 +191,7 @@ space.setattr(w_self, space.wrap('msg'), w_msg) # build a new AssertionError class to replace the original one. - w_BuiltinAssertionError = space.getitem(space.builtin.w_dict, + w_BuiltinAssertionError = space.getitem(space.builtin.w_dict, space.wrap('AssertionError')) w_metaclass = space.type(w_BuiltinAssertionError) w_init = space.wrap(gateway.interp2app_temp(my_init)) @@ -260,9 +264,9 @@ app_raises = gateway.interp2app_temp(pypyraises) -def pypyskip(space, w_message): - """skip a test at app-level. """ - msg = space.unwrap(w_message) +def pypyskip(space, w_message): + """skip a test at app-level. """ + msg = space.unwrap(w_message) py.test.skip(msg) app_skip = gateway.interp2app_temp(pypyskip) diff --git a/pypy/tool/readdictinfo.py b/pypy/tool/readdictinfo.py deleted file mode 100644 --- a/pypy/tool/readdictinfo.py +++ /dev/null @@ -1,115 +0,0 @@ -# this is for use with a pypy-c build with multidicts and using the -# MeasuringDictImplementation -- it will create a file called -# 'dictinfo.txt' in the local directory and this file will turn the -# contents back into DictInfo objects. - -# run with python -i ! - -import sys - -if __name__ == '__main__': - infile = open(sys.argv[1]) - - curr = None - slots = [] - for line in infile: - if line == '------------------\n': - if curr: - break - curr = 1 - else: - attr, val = [s.strip() for s in line.split(':')] - slots.append(attr) - - class DictInfo(object): - __slots__ = slots - - infile = open(sys.argv[1]) - - infos = [] - - for line in infile: - if line == '------------------\n': - curr = object.__new__(DictInfo) - infos.append(curr) - else: - attr, val = [s.strip() for s in line.split(':')] - if '.' in val: - val = float(val) - else: - val = int(val) - setattr(curr, attr, val) - -def histogram(infos, keyattr, *attrs): - r = {} - for info in infos: - v = getattr(info, keyattr) - l = r.setdefault(v, [0, {}]) - l[0] += 1 - for a in attrs: - d2 = l[1].setdefault(a, {}) - v2 = getattr(info, a) - d2[v2] = d2.get(v2, 0) + 1 - return sorted(r.items()) - -def reportDictInfos(): - d = {} - stillAlive = 0 - totLifetime = 0.0 - for info in infos: - for attr in slots: - if attr == 'maxcontents': - continue - v = getattr(info, attr) - if not isinstance(v, int): - continue - d[attr] = d.get(attr, 0) + v - if info.lifetime != -1.0: - totLifetime += info.lifetime - else: - stillAlive += 1 - print 'read info on', len(infos), 'dictionaries' - if stillAlive != len(infos): - print 'average lifetime', totLifetime/(len(infos) - stillAlive), - print '('+str(stillAlive), 'still alive at exit)' - print d - -def Rify(fname, *attributes): - output = open(fname, 'w') - for attr in attributes: - print >>output, attr, - print >>output - for info in infos: - for attr in attributes: - print >>output, getattr(info, attr), - print >>output - -if __name__ == '__main__': -# reportDictInfos() - - # interactive stuff: - - import __builtin__ - - def displayhook(v): - if v is not None: - __builtin__._ = v - pprint.pprint(v) - sys.displayhook = displayhook - - import pprint - try: - import readline - except ImportError: - pass - else: - import rlcompleter - readline.parse_and_bind('tab: complete') - - if len(sys.argv) > 2: - attrs = sys.argv[2].split(',') - if attrs == ['all']: - attrs = slots - Rify("R.txt", *attrs) - - diff --git a/pypy/tool/rundictbenchmarks.py b/pypy/tool/rundictbenchmarks.py deleted file mode 100644 --- a/pypy/tool/rundictbenchmarks.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys, os - -# this file runs some benchmarks with a pypy-c that is assumed to be -# built using the MeasuringDictImplementation. - -# it should be run with pypy/goal as the cwd, and you'll -# need to hack a copy of rst2html for yourself (svn docutils -# required). - -if __name__ == '__main__': - try: - os.unlink("dictinfo.txt") - except os.error: - pass - - progs = [('pystone', ['-c', 'from test import pystone; pystone.main()']), - ('richards', ['richards.py']), - ('docutils', ['rst2html.py', '../../doc/coding-guide.txt', 'foo.html']), - ('translate', ['translate.py', '--backendopt', '--no-compile', '--batch', - 'targetrpystonedalone.py']) - ] - - EXE = sys.argv[1] - - for suffix, args in progs: - os.spawnv(os.P_WAIT, EXE, [EXE] + args) - os.rename('dictinfo.txt', 'dictinfo-%s.txt'%suffix) diff --git a/rpython/jit/backend/conftest.py b/rpython/jit/backend/conftest.py --- a/rpython/jit/backend/conftest.py +++ b/rpython/jit/backend/conftest.py @@ -6,7 +6,7 @@ def pytest_addoption(parser): group = parser.getgroup('random test options') - group.addoption('--random-seed', action="store", type="int", + group.addoption('--random-seed', action="store", type=int, default=random.randrange(0, 10000), dest="randomseed", help="choose a fixed random seed") @@ -15,19 +15,19 @@ choices=['llgraph', 'cpu'], dest="backend", help="select the backend to run the functions with") - group.addoption('--block-length', action="store", type="int", + group.addoption('--block-length', action="store", type=int, default=30, dest="block_length", help="insert up to this many operations in each test") - group.addoption('--n-vars', action="store", type="int", + group.addoption('--n-vars', action="store", type=int, default=10, dest="n_vars", help="supply this many randomly-valued arguments to " "the function") - group.addoption('--repeat', action="store", type="int", + group.addoption('--repeat', action="store", type=int, default=15, dest="repeat", help="run the test this many times"), - group.addoption('--output', '-O', action="store", type="str", + group.addoption('--output', '-O', action="store", type=str, default="", dest="output", help="dump output to a file") diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -5,7 +5,7 @@ """ import weakref -import os +import os, py from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype from rpython.rlib.jit import JitDriver, dont_look_inside @@ -13,6 +13,7 @@ from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir from rpython.config.translationoption import DEFL_GC +from rpython.config.config import ConfigError class X(object): @@ -166,6 +167,9 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) + except ConfigError, e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -1,4 +1,4 @@ -import os, sys +import os, sys, py from rpython.tool.udir import udir from rpython.rlib.jit import JitDriver, unroll_parameters, set_param from rpython.rlib.jit import PARAMETERS, dont_look_inside @@ -7,7 +7,7 @@ from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy - +from rpython.config.config import ConfigError class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() @@ -252,6 +252,9 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) + except ConfigError,e: + assert str(e).startswith('invalid value asmgcc') + py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/x86/support.py b/rpython/jit/backend/x86/support.py --- a/rpython/jit/backend/x86/support.py +++ b/rpython/jit/backend/x86/support.py @@ -7,11 +7,12 @@ extra = ['-DPYPY_X86_CHECK_SSE2'] if sys.platform != 'win32': extra += ['-msse2', '-mfpmath=sse'] + else: + extra += ['/arch:SSE2'] else: extra = [] # the -m options above are always on by default on x86-64 -if sys.platform != 'win32': - extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra +extra = ['-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo( compile_extra = extra, diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -887,6 +887,10 @@ def bhimpl_int_isconstant(x): return False + @arguments("f", returns="i") + def bhimpl_float_isconstant(x): + return False + @arguments("r", returns="i") def bhimpl_ref_isconstant(x): return False diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -387,24 +387,17 @@ @arguments("descr") def opimpl_new(self, sizedescr): - resbox = self.execute_with_descr(rop.NEW, sizedescr) - self.metainterp.heapcache.new(resbox) - return resbox + return self.metainterp.execute_new(sizedescr) @arguments("descr") def opimpl_new_with_vtable(self, sizedescr): cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) - resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.heapcache.new(resbox) - self.metainterp.heapcache.class_now_known(resbox) - return resbox + return self.metainterp.execute_new_with_vtable(ConstInt(cls)) @arguments("box", "descr") def opimpl_new_array(self, lengthbox, itemsizedescr): - resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox) - self.metainterp.heapcache.new_array(resbox, lengthbox) - return resbox + return self.metainterp.execute_new_array(itemsizedescr, lengthbox) @specialize.arg(1) def _do_getarrayitem_gc_any(self, op, arraybox, indexbox, arraydescr): @@ -467,10 +460,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_setarrayitem_gc_any(self, arraybox, indexbox, itembox, arraydescr): - self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, - indexbox, itembox) - self.metainterp.heapcache.setarrayitem( - arraybox, indexbox, itembox, arraydescr) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -623,21 +614,22 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return - # The following test is disabled because buggy. It is supposed + self.metainterp.execute_setfield_gc(fielddescr, box, valuebox) + # The following logic is disabled because buggy. It is supposed # to be: not(we're writing null into a freshly allocated object) # but the bug is that is_unescaped() can be True even after the # field cache is cleared --- see test_ajit:test_unescaped_write_zero - if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): - self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heapcache.setfield(box, valuebox, fielddescr) + # + # if tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): + # self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) + # self.metainterp.heapcache.setfield(box, valuebox, fielddescr) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @arguments("box", "box", "box", "descr") def _opimpl_setinteriorfield_gc_any(self, array, index, value, descr): - self.execute_with_descr(rop.SETINTERIORFIELD_GC, descr, - array, index, value) + self.metainterp.execute_setinteriorfield_gc(descr, array, index, value) opimpl_setinteriorfield_gc_i = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_f = _opimpl_setinteriorfield_gc_any From noreply at buildbot.pypy.org Thu Apr 17 15:02:12 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 15:02:12 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: minimize difference in merge Message-ID: <20140417130212.8231A1D2840@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70699:0e29fcfa8d8c Date: 2014-04-17 11:18 +0300 http://bitbucket.org/pypy/pypy/changeset/0e29fcfa8d8c/ Log: minimize difference in merge diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -44,6 +44,7 @@ from pypy.module.micronumpy import support from pypy.module.micronumpy.base import W_NDimArray + class PureShapeIter(object): def __init__(self, shape, idx_w): self.shape = shape From noreply at buildbot.pypy.org Thu Apr 17 15:02:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 15:02:13 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: revert merge cruft Message-ID: <20140417130213.9CE451D2840@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70700:71c0ca886656 Date: 2014-04-17 14:04 +0300 http://bitbucket.org/pypy/pypy/changeset/71c0ca886656/ Log: revert merge cruft diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -143,6 +144,39 @@ self.array.setitem(self.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal + + def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() From noreply at buildbot.pypy.org Thu Apr 17 15:52:25 2014 From: noreply at buildbot.pypy.org (Conrad Calmez) Date: Thu, 17 Apr 2014 15:52:25 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-stm-c4: added branch to work on stm again Message-ID: <20140417135225.8B6AD1C02FC@cobra.cs.uni-duesseldorf.de> Author: Conrad Calmez Branch: storage-stm-c4 Changeset: r793:cef87bf823a0 Date: 2014-04-17 15:51 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/cef87bf823a0/ Log: added branch to work on stm again From noreply at buildbot.pypy.org Thu Apr 17 17:04:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 17 Apr 2014 17:04:43 +0200 (CEST) Subject: [pypy-commit] pypy default: A no-test change that looks like a good idea, to revert what Message-ID: <20140417150443.D28AB1C150C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70701:b6cae81858b5 Date: 2014-04-17 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/b6cae81858b5/ Log: A no-test change that looks like a good idea, to revert what looks like a mistake in a7023a962605 diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -478,6 +478,8 @@ return w_mod2 self.setitem(w_modules, w_name, w_mod) w_mod.init(self) + else: + self.setitem(w_modules, w_name, w_mod) return w_mod def get_builtinmodule_to_install(self): From noreply at buildbot.pypy.org Thu Apr 17 17:28:24 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 17:28:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fix unused breakfinder Message-ID: <20140417152824.CD4241C01F4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70702:06328c7fb4d5 Date: 2014-04-17 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/06328c7fb4d5/ Log: fix unused breakfinder diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -3,8 +3,8 @@ TRANSACTION_BREAK = set([ - 'stm_commit_transaction', - 'stm_start_inevitable_transaction', + 'stm_commit_if_not_atomic', + 'stm_start_inevitable_if_not_atomic', #'stm_perform_transaction', #'stm_partial_commit_and_resume_other_threads', # new priv_revision #'jit_assembler_call', From noreply at buildbot.pypy.org Thu Apr 17 17:28:26 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 17:28:26 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: more optimal read-barrier placement for non-jit. doesn't improve performance Message-ID: <20140417152826.207E61C01F4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70703:5e60afcef785 Date: 2014-04-17 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/5e60afcef785/ Log: more optimal read-barrier placement for non-jit. doesn't improve performance measurably diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -1,14 +1,17 @@ from rpython.flowspace.model import SpaceOperation, Constant, Variable -from rpython.translator.unsimplify import varoftype from rpython.rtyper.lltypesystem import lltype +from rpython.translator.unsimplify import varoftype, insert_empty_block +from rpython.translator.unsimplify import insert_empty_startblock +from rpython.translator.simplify import join_blocks +MALLOCS = set([ + 'malloc', 'malloc_varsize', + 'malloc_nonmovable', 'malloc_nonmovable_varsize', + ]) + READ_OPS = set(['getfield', 'getarrayitem', 'getinteriorfield', 'raw_load']) - -def is_gc_ptr(T): - return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' - def unwraplist(list_v): for v in list_v: if isinstance(v, Constant): @@ -33,25 +36,38 @@ return OUTER._immutable_interiorfield(unwraplist(op.args[1:-1])) if op.opname in ('raw_load', 'raw_store'): return False + raise AssertionError(op) +def needs_barrier(frm, to): + return to > frm -def insert_stm_read_barrier(transformer, graph): - # We need to put enough 'stm_read' in the graph so that any - # execution of a READ_OP on some GC object is guaranteed to also - # execute either 'stm_read' or 'stm_write' on the same GC object - # during the same transaction. - # - # XXX this can be optimized a lot, but for now we go with the - # simplest possible solution... - # - gcremovetypeptr = transformer.translator.config.translation.gcremovetypeptr +def is_gc_ptr(T): + return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' - for block in graph.iterblocks(): - if not block.operations: - continue - newops = [] + + + +class BlockTransformer(object): + + def __init__(self, stmtransformer, block): + self.stmtransformer = stmtransformer + self.block = block + self.patch = None + self.inputargs_category = None + self.inputargs_category_per_link = {} + + def init_start_block(self): + from_outside = ['A'] * len(self.block.inputargs) + self.inputargs_category_per_link[None] = from_outside + self.update_inputargs_category() + + + def analyze_inside_block(self, graph): + gcremovetypeptr = ( + self.stmtransformer.translator.config.translation.gcremovetypeptr) + wants_a_barrier = {} stm_ignored = False - for op in block.operations: + for op in self.block.operations: is_getter = (op.opname in READ_OPS and op.result.concretetype is not lltype.Void and is_gc_ptr(op.args[0].concretetype)) @@ -61,26 +77,188 @@ op.args[0].concretetype.TO._hints.get('typeptr')): # typeptr is always immutable pass - elif ((op.opname in ('getarraysize', 'getinteriorarraysize') and - is_gc_ptr(op.args[0].concretetype)) or + elif (op.opname in ('getarraysize', 'getinteriorarraysize') and + is_gc_ptr(op.args[0].concretetype) or (is_getter and is_immutable(op))): # immutable getters - # 'weakref_deref': kind of immutable, but the GC has to see - # which transactions read from a dying weakref, so we - # need the barrier nonetheless... pass elif is_getter: + # the non-immutable getfields need a regular read barrier if not stm_ignored: - v_none = varoftype(lltype.Void) - newops.append(SpaceOperation('stm_read', - [op.args[0]], v_none)) - transformer.read_barrier_counts += 1 + wants_a_barrier[op] = 'R' + elif op.opname == 'weakref_deref': + # 'weakref_deref' needs a read barrier if we want to work + # around the "weakref issue" + assert not stm_ignored + wants_a_barrier[op] = 'R' elif op.opname == 'stm_ignored_start': - assert stm_ignored == False + assert not stm_ignored, "nested 'with stm_ignored'" stm_ignored = True elif op.opname == 'stm_ignored_stop': - assert stm_ignored == True + assert stm_ignored, "stm_ignored_stop without start?" stm_ignored = False - newops.append(op) - assert stm_ignored == False - block.operations = newops + # + if stm_ignored: + raise Exception("%r: 'with stm_ignored:' code body too complex" + % (graph,)) + self.wants_a_barrier = wants_a_barrier + + + def flow_through_block(self): + def cat_fetch(v): + return categories.setdefault(v, 'A') + + def get_category_or_null(v): + # 'v' is an original variable here, or a constant + if isinstance(v, Constant) and not v.value: # a NULL constant + return 'Z' + if v in categories: + return categories[v] + return 'A' + + newoperations = [] + stmtransformer = self.stmtransformer + categories = {} + + # make the initial trivial renamings needed to have some precise + # categories for the input args + for v, cat in zip(self.block.inputargs, self.inputargs_category): + if is_gc_ptr(v.concretetype): + assert cat is not None + categories[v] = cat + + for op in self.block.operations: + if (op.opname in ('cast_pointer', 'same_as') and + is_gc_ptr(op.result.concretetype)): + categories[op.result] = cat_fetch(op.args[0]) + newoperations.append(op) + continue + # + to = self.wants_a_barrier.get(op) + if to is not None: + v = op.args[0] + frm = cat_fetch(v) + if needs_barrier(frm, to): + stmtransformer.read_barrier_counts += 1 + v_none = varoftype(lltype.Void) + newop = SpaceOperation('stm_read', [v], v_none) + categories[v] = to + newoperations.append(newop) + # + newoperations.append(op) + # + if stmtransformer.break_analyzer.analyze(op): + # this operation can perform a transaction break: + # all references are lowered to 'A' again + for v in categories: + categories[v] = 'A' + + if op.opname == 'debug_stm_flush_barrier': + for v in categories: + categories[v] = 'A' + + if op.opname in MALLOCS: + categories[op.result] = 'R' + + blockoperations = newoperations + linkoperations = [] + for link in self.block.exits: + output_categories = [] + for v in link.args: + if is_gc_ptr(v.concretetype): + cat = cat_fetch(v) + else: + cat = None + output_categories.append(cat) + linkoperations.append(output_categories) + # + # Record how we'd like to patch the block, but don't do any + # patching yet + self.patch = (blockoperations, linkoperations) + + + def update_targets(self, block_transformers): + (_, linkoperations) = self.patch + assert len(linkoperations) == len(self.block.exits) + targetbts = [] + for link, output_categories in zip(self.block.exits, linkoperations): + targetblock = link.target + if targetblock not in block_transformers: + continue # ignore the exit block + targetbt = block_transformers[targetblock] + targetbt.inputargs_category_per_link[link] = output_categories + if targetbt.update_inputargs_category(): + targetbts.append(targetbt) + return set(targetbts) + + def update_inputargs_category(self): + values = self.inputargs_category_per_link.values() + newcats = [] + for i, v in enumerate(self.block.inputargs): + if is_gc_ptr(v.concretetype): + cats = [output_categories[i] for output_categories in values] + assert None not in cats + newcats.append(min(cats)) + else: + newcats.append(None) + if newcats != self.inputargs_category: + self.inputargs_category = newcats + return True + else: + return False + + + def patch_now(self): + if self.patch is None: + return + newoperations, linkoperations = self.patch + self.block.operations = newoperations + assert len(linkoperations) == len(self.block.exits) + # for link, (newargs, newoperations, _) in zip(self.block.exits, + # linkoperations): + # link.args[:] = newargs + # if newoperations: + # # must put them in a fresh block along the link + # annotator = self.stmtransformer.translator.annotator + # insert_empty_block(annotator, link, newoperations) + + +def insert_stm_read_barrier(stmtransformer, graph): + """This function uses the following characters for 'categories': + + * 'A': any general pointer + * 'R': the read barrier was applied + * 'Z': the null constant + + The letters are chosen so that a barrier is needed to change a + pointer from category x to category y if and only if y > x. + """ + # XXX: we currently don't use the information that any write + # operation on a gcptr will make it readable automatically + join_blocks(graph) + annotator = stmtransformer.translator.annotator + insert_empty_startblock(annotator, graph) + + block_transformers = {} + + for block in graph.iterblocks(): + if block.operations == (): + continue + bt = BlockTransformer(stmtransformer, block) + bt.analyze_inside_block(graph) + block_transformers[block] = bt + + bt = block_transformers[graph.startblock] + bt.init_start_block() + pending = set([bt]) + + while pending: + bt = pending.pop() + bt.flow_through_block() + pending |= bt.update_targets(block_transformers) + + for bt in block_transformers.values(): + bt.patch_now() + + # needed only for some fragile test ztranslated.test_stm_ignored + join_blocks(graph) diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py --- a/rpython/translator/stm/test/test_readbarrier.py +++ b/rpython/translator/stm/test/test_readbarrier.py @@ -1,7 +1,8 @@ from rpython.rlib.objectmodel import stm_ignored from rpython.translator.stm.test.transform_support import BaseTestTransform -from rpython.rtyper.lltypesystem import lltype - +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rstm import register_invoke_around_extcall +from rpython.rtyper.lltypesystem.lloperation import llop class TestReadBarrier(BaseTestTransform): do_read_barrier = True @@ -26,6 +27,127 @@ assert res == 42 assert self.read_barriers == [x1] + def test_array_size(self): + array_gc = lltype.GcArray(('z', lltype.Signed)) + array_nongc = lltype.Array(('z', lltype.Signed)) + Q = lltype.GcStruct('Q', + ('gc', lltype.Ptr(array_gc)), + ('raw', lltype.Ptr(array_nongc))) + q = lltype.malloc(Q, immortal=True) + q.gc = lltype.malloc(array_gc, n=3, flavor='gc', immortal=True) + q.raw = lltype.malloc(array_nongc, n=5, flavor='raw', immortal=True) + def f1(n): + if n == 1: + return len(q.gc) + else: + return len(q.raw) + self.interpret(f1, [1]) + assert self.read_barriers == [q] + self.interpret(f1, [0]) + assert self.read_barriers == [q] + + def test_simple_read_2(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x2 = lltype.malloc(X, immortal=True) + x2.foo = 81 + null = lltype.nullptr(X) + + def f1(n): + if n < 1: + p = null + else: + p = x2 + return p.foo + + res = self.interpret(f1, [4]) + assert res == 81 + assert self.read_barriers == [x2] + + + def test_multiple_reads(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed), + ('bar', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 6 + x1.bar = 7 + x2 = lltype.malloc(X, immortal=True) + x2.foo = 81 + x2.bar = -1 + + def f1(n): + if n > 1: + return x2.foo * x2.bar + else: + return x1.foo * x1.bar + + res = self.interpret(f1, [4]) + assert res == -81 + assert self.read_barriers == [x2] + + def test_malloc(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(n): + p = lltype.malloc(X) + p.foo = n + + self.interpret(f1, [4]) + assert self.read_barriers == [] + + def test_repeat_read_barrier_after_malloc(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 6 + def f1(n): + i = x1.foo + lltype.malloc(X) + i = x1.foo + i + return i + + self.interpret(f1, [4]) + assert self.read_barriers == [x1] + + def test_call_external_release_gil(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_release_gil() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.read_barriers == [x, x] + + def test_call_external_any_gcobj(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_any_gcobj() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.read_barriers == [x] + + def test_call_external_safest(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_safest() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.read_barriers == [x] + def test_stm_ignored_read(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) x1 = lltype.malloc(X, immortal=True) @@ -48,3 +170,19 @@ res = self.interpret(f1, [2]) assert res == 42 assert self.read_barriers == [x1] + + + + +external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + releasegil=True) # GIL is released +external_any_gcobj = rffi.llexternal('external_any_gcobj', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + releasegil=False) # GIL is not released +external_safest = rffi.llexternal('external_safest', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=False, + releasegil=False) # GIL is not released diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -103,12 +103,6 @@ def op_setfield(self, obj, fieldname, fieldvalue): if obj._TYPE.TO._gckind == 'gc': - T = lltype.typeOf(fieldvalue) - if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc': - self.check_category(obj, 'W') - else: - self.check_category(obj, 'V') - # convert R -> Q all other pointers to the same object we can find for p in self.all_stm_ptrs(): if p._category == 'R' and p._T == obj._T and p == obj: _stmptr._category.__set__(p, 'Q') @@ -138,7 +132,6 @@ _stmptr._category.__set__(p, 'V') p = LLFrame.op_malloc(self, obj, flags) ptr2 = _stmptr(p, 'W') - self.llinterpreter.tester.writemode.add(ptr2._obj) return ptr2 def transaction_break(self): @@ -147,6 +140,13 @@ if p._category > 'I': _stmptr._category.__set__(p, 'I') + + def op_stm_commit_if_not_atomic(self): + self.transaction_break() + + def op_stm_start_inevitable_if_not_atomic(self): + self.transaction_break() + def op_stm_commit_transaction(self): self.transaction_break() diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -2,6 +2,7 @@ from rpython.translator.stm.readbarrier import insert_stm_read_barrier from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.stm.threadlocalref import transform_tlref +from rpython.translator.stm.breakfinder import TransactionBreakAnalyzer from rpython.translator.c.support import log @@ -29,10 +30,12 @@ self.print_logs(3) def transform_read_barrier(self): + self.break_analyzer = TransactionBreakAnalyzer(self.translator) self.read_barrier_counts = 0 for graph in self.translator.graphs: insert_stm_read_barrier(self, graph) log.info("%d read barriers inserted" % (self.read_barrier_counts,)) + del self.break_analyzer def transform_turn_inevitable(self): for graph in self.translator.graphs: From noreply at buildbot.pypy.org Thu Apr 17 17:28:27 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 17:28:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Backed out changeset: 5e60afcef785 (doesn't improve performance) Message-ID: <20140417152827.422BC1C01F4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70704:899027a93ced Date: 2014-04-17 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/899027a93ced/ Log: Backed out changeset: 5e60afcef785 (doesn't improve performance) diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -1,17 +1,14 @@ from rpython.flowspace.model import SpaceOperation, Constant, Variable +from rpython.translator.unsimplify import varoftype from rpython.rtyper.lltypesystem import lltype -from rpython.translator.unsimplify import varoftype, insert_empty_block -from rpython.translator.unsimplify import insert_empty_startblock -from rpython.translator.simplify import join_blocks -MALLOCS = set([ - 'malloc', 'malloc_varsize', - 'malloc_nonmovable', 'malloc_nonmovable_varsize', - ]) - READ_OPS = set(['getfield', 'getarrayitem', 'getinteriorfield', 'raw_load']) + +def is_gc_ptr(T): + return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' + def unwraplist(list_v): for v in list_v: if isinstance(v, Constant): @@ -36,38 +33,25 @@ return OUTER._immutable_interiorfield(unwraplist(op.args[1:-1])) if op.opname in ('raw_load', 'raw_store'): return False - raise AssertionError(op) -def needs_barrier(frm, to): - return to > frm -def is_gc_ptr(T): - return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' +def insert_stm_read_barrier(transformer, graph): + # We need to put enough 'stm_read' in the graph so that any + # execution of a READ_OP on some GC object is guaranteed to also + # execute either 'stm_read' or 'stm_write' on the same GC object + # during the same transaction. + # + # XXX this can be optimized a lot, but for now we go with the + # simplest possible solution... + # + gcremovetypeptr = transformer.translator.config.translation.gcremovetypeptr - - - -class BlockTransformer(object): - - def __init__(self, stmtransformer, block): - self.stmtransformer = stmtransformer - self.block = block - self.patch = None - self.inputargs_category = None - self.inputargs_category_per_link = {} - - def init_start_block(self): - from_outside = ['A'] * len(self.block.inputargs) - self.inputargs_category_per_link[None] = from_outside - self.update_inputargs_category() - - - def analyze_inside_block(self, graph): - gcremovetypeptr = ( - self.stmtransformer.translator.config.translation.gcremovetypeptr) - wants_a_barrier = {} + for block in graph.iterblocks(): + if not block.operations: + continue + newops = [] stm_ignored = False - for op in self.block.operations: + for op in block.operations: is_getter = (op.opname in READ_OPS and op.result.concretetype is not lltype.Void and is_gc_ptr(op.args[0].concretetype)) @@ -77,188 +61,26 @@ op.args[0].concretetype.TO._hints.get('typeptr')): # typeptr is always immutable pass - elif (op.opname in ('getarraysize', 'getinteriorarraysize') and - is_gc_ptr(op.args[0].concretetype) or + elif ((op.opname in ('getarraysize', 'getinteriorarraysize') and + is_gc_ptr(op.args[0].concretetype)) or (is_getter and is_immutable(op))): # immutable getters + # 'weakref_deref': kind of immutable, but the GC has to see + # which transactions read from a dying weakref, so we + # need the barrier nonetheless... pass elif is_getter: - # the non-immutable getfields need a regular read barrier if not stm_ignored: - wants_a_barrier[op] = 'R' - elif op.opname == 'weakref_deref': - # 'weakref_deref' needs a read barrier if we want to work - # around the "weakref issue" - assert not stm_ignored - wants_a_barrier[op] = 'R' + v_none = varoftype(lltype.Void) + newops.append(SpaceOperation('stm_read', + [op.args[0]], v_none)) + transformer.read_barrier_counts += 1 elif op.opname == 'stm_ignored_start': - assert not stm_ignored, "nested 'with stm_ignored'" + assert stm_ignored == False stm_ignored = True elif op.opname == 'stm_ignored_stop': - assert stm_ignored, "stm_ignored_stop without start?" + assert stm_ignored == True stm_ignored = False - # - if stm_ignored: - raise Exception("%r: 'with stm_ignored:' code body too complex" - % (graph,)) - self.wants_a_barrier = wants_a_barrier - - - def flow_through_block(self): - def cat_fetch(v): - return categories.setdefault(v, 'A') - - def get_category_or_null(v): - # 'v' is an original variable here, or a constant - if isinstance(v, Constant) and not v.value: # a NULL constant - return 'Z' - if v in categories: - return categories[v] - return 'A' - - newoperations = [] - stmtransformer = self.stmtransformer - categories = {} - - # make the initial trivial renamings needed to have some precise - # categories for the input args - for v, cat in zip(self.block.inputargs, self.inputargs_category): - if is_gc_ptr(v.concretetype): - assert cat is not None - categories[v] = cat - - for op in self.block.operations: - if (op.opname in ('cast_pointer', 'same_as') and - is_gc_ptr(op.result.concretetype)): - categories[op.result] = cat_fetch(op.args[0]) - newoperations.append(op) - continue - # - to = self.wants_a_barrier.get(op) - if to is not None: - v = op.args[0] - frm = cat_fetch(v) - if needs_barrier(frm, to): - stmtransformer.read_barrier_counts += 1 - v_none = varoftype(lltype.Void) - newop = SpaceOperation('stm_read', [v], v_none) - categories[v] = to - newoperations.append(newop) - # - newoperations.append(op) - # - if stmtransformer.break_analyzer.analyze(op): - # this operation can perform a transaction break: - # all references are lowered to 'A' again - for v in categories: - categories[v] = 'A' - - if op.opname == 'debug_stm_flush_barrier': - for v in categories: - categories[v] = 'A' - - if op.opname in MALLOCS: - categories[op.result] = 'R' - - blockoperations = newoperations - linkoperations = [] - for link in self.block.exits: - output_categories = [] - for v in link.args: - if is_gc_ptr(v.concretetype): - cat = cat_fetch(v) - else: - cat = None - output_categories.append(cat) - linkoperations.append(output_categories) - # - # Record how we'd like to patch the block, but don't do any - # patching yet - self.patch = (blockoperations, linkoperations) - - - def update_targets(self, block_transformers): - (_, linkoperations) = self.patch - assert len(linkoperations) == len(self.block.exits) - targetbts = [] - for link, output_categories in zip(self.block.exits, linkoperations): - targetblock = link.target - if targetblock not in block_transformers: - continue # ignore the exit block - targetbt = block_transformers[targetblock] - targetbt.inputargs_category_per_link[link] = output_categories - if targetbt.update_inputargs_category(): - targetbts.append(targetbt) - return set(targetbts) - - def update_inputargs_category(self): - values = self.inputargs_category_per_link.values() - newcats = [] - for i, v in enumerate(self.block.inputargs): - if is_gc_ptr(v.concretetype): - cats = [output_categories[i] for output_categories in values] - assert None not in cats - newcats.append(min(cats)) - else: - newcats.append(None) - if newcats != self.inputargs_category: - self.inputargs_category = newcats - return True - else: - return False - - - def patch_now(self): - if self.patch is None: - return - newoperations, linkoperations = self.patch - self.block.operations = newoperations - assert len(linkoperations) == len(self.block.exits) - # for link, (newargs, newoperations, _) in zip(self.block.exits, - # linkoperations): - # link.args[:] = newargs - # if newoperations: - # # must put them in a fresh block along the link - # annotator = self.stmtransformer.translator.annotator - # insert_empty_block(annotator, link, newoperations) - - -def insert_stm_read_barrier(stmtransformer, graph): - """This function uses the following characters for 'categories': - - * 'A': any general pointer - * 'R': the read barrier was applied - * 'Z': the null constant - - The letters are chosen so that a barrier is needed to change a - pointer from category x to category y if and only if y > x. - """ - # XXX: we currently don't use the information that any write - # operation on a gcptr will make it readable automatically - join_blocks(graph) - annotator = stmtransformer.translator.annotator - insert_empty_startblock(annotator, graph) - - block_transformers = {} - - for block in graph.iterblocks(): - if block.operations == (): - continue - bt = BlockTransformer(stmtransformer, block) - bt.analyze_inside_block(graph) - block_transformers[block] = bt - - bt = block_transformers[graph.startblock] - bt.init_start_block() - pending = set([bt]) - - while pending: - bt = pending.pop() - bt.flow_through_block() - pending |= bt.update_targets(block_transformers) - - for bt in block_transformers.values(): - bt.patch_now() - - # needed only for some fragile test ztranslated.test_stm_ignored - join_blocks(graph) + newops.append(op) + assert stm_ignored == False + block.operations = newops diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py --- a/rpython/translator/stm/test/test_readbarrier.py +++ b/rpython/translator/stm/test/test_readbarrier.py @@ -1,8 +1,7 @@ from rpython.rlib.objectmodel import stm_ignored from rpython.translator.stm.test.transform_support import BaseTestTransform -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.rstm import register_invoke_around_extcall -from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem import lltype + class TestReadBarrier(BaseTestTransform): do_read_barrier = True @@ -27,127 +26,6 @@ assert res == 42 assert self.read_barriers == [x1] - def test_array_size(self): - array_gc = lltype.GcArray(('z', lltype.Signed)) - array_nongc = lltype.Array(('z', lltype.Signed)) - Q = lltype.GcStruct('Q', - ('gc', lltype.Ptr(array_gc)), - ('raw', lltype.Ptr(array_nongc))) - q = lltype.malloc(Q, immortal=True) - q.gc = lltype.malloc(array_gc, n=3, flavor='gc', immortal=True) - q.raw = lltype.malloc(array_nongc, n=5, flavor='raw', immortal=True) - def f1(n): - if n == 1: - return len(q.gc) - else: - return len(q.raw) - self.interpret(f1, [1]) - assert self.read_barriers == [q] - self.interpret(f1, [0]) - assert self.read_barriers == [q] - - def test_simple_read_2(self): - X = lltype.GcStruct('X', ('foo', lltype.Signed)) - x2 = lltype.malloc(X, immortal=True) - x2.foo = 81 - null = lltype.nullptr(X) - - def f1(n): - if n < 1: - p = null - else: - p = x2 - return p.foo - - res = self.interpret(f1, [4]) - assert res == 81 - assert self.read_barriers == [x2] - - - def test_multiple_reads(self): - X = lltype.GcStruct('X', ('foo', lltype.Signed), - ('bar', lltype.Signed)) - x1 = lltype.malloc(X, immortal=True) - x1.foo = 6 - x1.bar = 7 - x2 = lltype.malloc(X, immortal=True) - x2.foo = 81 - x2.bar = -1 - - def f1(n): - if n > 1: - return x2.foo * x2.bar - else: - return x1.foo * x1.bar - - res = self.interpret(f1, [4]) - assert res == -81 - assert self.read_barriers == [x2] - - def test_malloc(self): - X = lltype.GcStruct('X', ('foo', lltype.Signed)) - def f1(n): - p = lltype.malloc(X) - p.foo = n - - self.interpret(f1, [4]) - assert self.read_barriers == [] - - def test_repeat_read_barrier_after_malloc(self): - X = lltype.GcStruct('X', ('foo', lltype.Signed)) - x1 = lltype.malloc(X, immortal=True) - x1.foo = 6 - def f1(n): - i = x1.foo - lltype.malloc(X) - i = x1.foo + i - return i - - self.interpret(f1, [4]) - assert self.read_barriers == [x1] - - def test_call_external_release_gil(self): - X = lltype.GcStruct('X', ('foo', lltype.Signed)) - def f1(p): - register_invoke_around_extcall() - x1 = p.foo - external_release_gil() - x2 = p.foo - return x1 * x2 - - x = lltype.malloc(X, immortal=True); x.foo = 6 - res = self.interpret(f1, [x]) - assert res == 36 - assert self.read_barriers == [x, x] - - def test_call_external_any_gcobj(self): - X = lltype.GcStruct('X', ('foo', lltype.Signed)) - def f1(p): - register_invoke_around_extcall() - x1 = p.foo - external_any_gcobj() - x2 = p.foo - return x1 * x2 - - x = lltype.malloc(X, immortal=True); x.foo = 6 - res = self.interpret(f1, [x]) - assert res == 36 - assert self.read_barriers == [x] - - def test_call_external_safest(self): - X = lltype.GcStruct('X', ('foo', lltype.Signed)) - def f1(p): - register_invoke_around_extcall() - x1 = p.foo - external_safest() - x2 = p.foo - return x1 * x2 - - x = lltype.malloc(X, immortal=True); x.foo = 6 - res = self.interpret(f1, [x]) - assert res == 36 - assert self.read_barriers == [x] - def test_stm_ignored_read(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) x1 = lltype.malloc(X, immortal=True) @@ -170,19 +48,3 @@ res = self.interpret(f1, [2]) assert res == 42 assert self.read_barriers == [x1] - - - - -external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, - _callable=lambda: None, - random_effects_on_gcobjs=True, - releasegil=True) # GIL is released -external_any_gcobj = rffi.llexternal('external_any_gcobj', [], lltype.Void, - _callable=lambda: None, - random_effects_on_gcobjs=True, - releasegil=False) # GIL is not released -external_safest = rffi.llexternal('external_safest', [], lltype.Void, - _callable=lambda: None, - random_effects_on_gcobjs=False, - releasegil=False) # GIL is not released diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -103,6 +103,12 @@ def op_setfield(self, obj, fieldname, fieldvalue): if obj._TYPE.TO._gckind == 'gc': + T = lltype.typeOf(fieldvalue) + if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc': + self.check_category(obj, 'W') + else: + self.check_category(obj, 'V') + # convert R -> Q all other pointers to the same object we can find for p in self.all_stm_ptrs(): if p._category == 'R' and p._T == obj._T and p == obj: _stmptr._category.__set__(p, 'Q') @@ -132,6 +138,7 @@ _stmptr._category.__set__(p, 'V') p = LLFrame.op_malloc(self, obj, flags) ptr2 = _stmptr(p, 'W') + self.llinterpreter.tester.writemode.add(ptr2._obj) return ptr2 def transaction_break(self): @@ -140,13 +147,6 @@ if p._category > 'I': _stmptr._category.__set__(p, 'I') - - def op_stm_commit_if_not_atomic(self): - self.transaction_break() - - def op_stm_start_inevitable_if_not_atomic(self): - self.transaction_break() - def op_stm_commit_transaction(self): self.transaction_break() diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -2,7 +2,6 @@ from rpython.translator.stm.readbarrier import insert_stm_read_barrier from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.stm.threadlocalref import transform_tlref -from rpython.translator.stm.breakfinder import TransactionBreakAnalyzer from rpython.translator.c.support import log @@ -30,12 +29,10 @@ self.print_logs(3) def transform_read_barrier(self): - self.break_analyzer = TransactionBreakAnalyzer(self.translator) self.read_barrier_counts = 0 for graph in self.translator.graphs: insert_stm_read_barrier(self, graph) log.info("%d read barriers inserted" % (self.read_barrier_counts,)) - del self.break_analyzer def transform_turn_inevitable(self): for graph in self.translator.graphs: From noreply at buildbot.pypy.org Thu Apr 17 17:28:28 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 17:28:28 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: delay stm_read until it needs to be done in stmrewrite. limited because we Message-ID: <20140417152828.606FA1C01F4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70705:10a78b65a5a9 Date: 2014-04-17 17:28 +0200 http://bitbucket.org/pypy/pypy/changeset/10a78b65a5a9/ Log: delay stm_read until it needs to be done in stmrewrite. limited because we have to do it before every guard diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -81,11 +81,15 @@ continue # ---------- fallback case (overwritten in stmrewrite) ----------- self.other_operation(op) + self.flush_caches() return self.newops def other_operation(self, op): self.newops.append(op) + def flush_caches(self): + pass + # ---------- def handle_malloc_operation(self, op): diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -4,10 +4,8 @@ from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.metainterp.history import BoxPtr, ConstPtr, ConstInt from rpython.rlib.objectmodel import specialize -from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import (have_debug_prints, debug_start, debug_stop, debug_print) -from rpython.jit.codewriter.effectinfo import EffectInfo class GcStmRewriterAssembler(GcRewriterAssembler): @@ -18,6 +16,7 @@ GcRewriterAssembler.__init__(self, *args) self.always_inevitable = False self.read_barrier_applied = {} + self.read_barrier_cache = [] def other_operation(self, op): opnum = op.getopnum() @@ -35,6 +34,10 @@ return # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): + if op.is_guard(): + # XXX: store this info on the guard descr + # to be able to do the stm_reads in resume + self.flush_read_barrier_cache() self.newops.append(op) return # ---------- non-pure getfields ---------- @@ -83,7 +86,11 @@ self.newops.append(op) return # ---------- jumps, finish, other ignored ops ---------- - if opnum in (rop.JUMP, rop.FINISH, rop.FORCE_TOKEN, + if opnum in (rop.JUMP, rop.FINISH): + self.flush_read_barrier_cache() + self.newops.append(op) + return + if opnum in (rop.FORCE_TOKEN, rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR, rop.JIT_DEBUG, rop.KEEPALIVE, rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, @@ -94,6 +101,7 @@ # Check that none of the ops handled here can collect. # This is not done by the fallback here assert not op.is_call() and not op.is_malloc() + self.flush_caches() self.fallback_inevitable(op) def handle_call_assembler(self, op): @@ -102,10 +110,28 @@ self.next_op_may_be_in_new_transaction() GcRewriterAssembler.handle_call_assembler(self, op) + def emitting_an_operation_that_can_collect(self): + self.flush_read_barrier_cache() + GcRewriterAssembler.emitting_an_operation_that_can_collect(self) + def next_op_may_be_in_new_transaction(self): + self.flush_read_barrier_cache() self.always_inevitable = False self.read_barrier_applied.clear() + def flush_caches(self): + self.flush_read_barrier_cache() + + def flush_read_barrier_cache(self): + for v_ptr in self.read_barrier_cache: + if (v_ptr not in self.read_barrier_applied # if multiple times in this list + and v_ptr not in self.write_barrier_applied): + op1 = ResOperation(rop.STM_READ, [v_ptr], None) + self.newops.append(op1) + self.read_barrier_applied[v_ptr] = None + + del self.read_barrier_cache[:] + def handle_getfields(self, op): # XXX missing optimitations: the placement of stm_read should # ideally be delayed for a bit longer after the getfields; if we @@ -116,9 +142,7 @@ v_ptr = op.getarg(0) if (v_ptr not in self.read_barrier_applied and v_ptr not in self.write_barrier_applied): - op1 = ResOperation(rop.STM_READ, [v_ptr], None) - self.newops.append(op1) - self.read_barrier_applied[v_ptr] = None + self.read_barrier_cache.append(v_ptr) def must_apply_write_barrier(self, val, v=None): @@ -142,7 +166,6 @@ debug_print("fallback for", op.repr()) def maybe_handle_raw_accesses(self, op): - from rpython.jit.backend.llsupport.descr import FieldDescr descr = op.getdescr() assert isinstance(descr, FieldDescr) if descr.stm_dont_track_raw_accesses: diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -157,11 +157,11 @@ """, """ [p1, p2] p3 = getfield_gc(p1, descr=tzdescr) - stm_read(p1) p4 = getfield_gc(p1, descr=tzdescr) p5 = getfield_gc(p2, descr=tzdescr) + p6 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) stm_read(p2) - p6 = getfield_gc(p1, descr=tzdescr) jump() """) @@ -191,10 +191,10 @@ """, """ [p0, p1, p2] p3 = getfield_gc(p1, descr=tzdescr) - stm_read(p1) cond_call_gc_wb(p2, descr=wbdescr) setfield_gc(p2, p0, descr=tzdescr) p4 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) jump() """, t=NULL) @@ -388,8 +388,8 @@ """, """ [p1] p2 = getfield_gc(p1, descr=tzdescr) + i2 = getfield_gc(p2, descr=tydescr) stm_read(p1) - i2 = getfield_gc(p2, descr=tydescr) stm_read(p2) jump(p2, i2) """) @@ -406,7 +406,6 @@ """, """ [p1] i1 = getfield_gc(p1, descr=tydescr) - stm_read(p1) i2 = int_add(i1, 1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i2, descr=tydescr) @@ -767,7 +766,7 @@ """, """ [i0, f0, p1] p2 = getfield_gc(p1, descr=tzdescr) - stm_read(p1) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) @@ -782,7 +781,6 @@ guard_not_forced() [] p3 = getfield_gc(p1, descr=tzdescr) - stm_read(p1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p3, descr=tzdescr) """) @@ -1182,8 +1180,8 @@ """ % d, """ [p1, p3, i1, p4] p2 = getfield_gc%(pure)s(p1, descr=uxdescr) + i4 = getarrayitem_gc%(pure)s(p4, i1, descr=vdescr) %(comment)s stm_read(p1) - i4 = getarrayitem_gc%(pure)s(p4, i1, descr=vdescr) %(comment)s stm_read(p4) jump(p2) """ % d, uxdescr=uxdescr, vdescr=vdescr) From noreply at buildbot.pypy.org Thu Apr 17 17:28:29 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 17 Apr 2014 17:28:29 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Backed out changeset: 10a78b65a5a9 (no measurable performance improvements) Message-ID: <20140417152829.7D23A1C01F4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70706:11a2f6c88ca6 Date: 2014-04-17 17:28 +0200 http://bitbucket.org/pypy/pypy/changeset/11a2f6c88ca6/ Log: Backed out changeset: 10a78b65a5a9 (no measurable performance improvements) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -81,15 +81,11 @@ continue # ---------- fallback case (overwritten in stmrewrite) ----------- self.other_operation(op) - self.flush_caches() return self.newops def other_operation(self, op): self.newops.append(op) - def flush_caches(self): - pass - # ---------- def handle_malloc_operation(self, op): diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -4,8 +4,10 @@ from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.metainterp.history import BoxPtr, ConstPtr, ConstInt from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import (have_debug_prints, debug_start, debug_stop, debug_print) +from rpython.jit.codewriter.effectinfo import EffectInfo class GcStmRewriterAssembler(GcRewriterAssembler): @@ -16,7 +18,6 @@ GcRewriterAssembler.__init__(self, *args) self.always_inevitable = False self.read_barrier_applied = {} - self.read_barrier_cache = [] def other_operation(self, op): opnum = op.getopnum() @@ -34,10 +35,6 @@ return # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): - if op.is_guard(): - # XXX: store this info on the guard descr - # to be able to do the stm_reads in resume - self.flush_read_barrier_cache() self.newops.append(op) return # ---------- non-pure getfields ---------- @@ -86,11 +83,7 @@ self.newops.append(op) return # ---------- jumps, finish, other ignored ops ---------- - if opnum in (rop.JUMP, rop.FINISH): - self.flush_read_barrier_cache() - self.newops.append(op) - return - if opnum in (rop.FORCE_TOKEN, + if opnum in (rop.JUMP, rop.FINISH, rop.FORCE_TOKEN, rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR, rop.JIT_DEBUG, rop.KEEPALIVE, rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, @@ -101,7 +94,6 @@ # Check that none of the ops handled here can collect. # This is not done by the fallback here assert not op.is_call() and not op.is_malloc() - self.flush_caches() self.fallback_inevitable(op) def handle_call_assembler(self, op): @@ -110,28 +102,10 @@ self.next_op_may_be_in_new_transaction() GcRewriterAssembler.handle_call_assembler(self, op) - def emitting_an_operation_that_can_collect(self): - self.flush_read_barrier_cache() - GcRewriterAssembler.emitting_an_operation_that_can_collect(self) - def next_op_may_be_in_new_transaction(self): - self.flush_read_barrier_cache() self.always_inevitable = False self.read_barrier_applied.clear() - def flush_caches(self): - self.flush_read_barrier_cache() - - def flush_read_barrier_cache(self): - for v_ptr in self.read_barrier_cache: - if (v_ptr not in self.read_barrier_applied # if multiple times in this list - and v_ptr not in self.write_barrier_applied): - op1 = ResOperation(rop.STM_READ, [v_ptr], None) - self.newops.append(op1) - self.read_barrier_applied[v_ptr] = None - - del self.read_barrier_cache[:] - def handle_getfields(self, op): # XXX missing optimitations: the placement of stm_read should # ideally be delayed for a bit longer after the getfields; if we @@ -142,7 +116,9 @@ v_ptr = op.getarg(0) if (v_ptr not in self.read_barrier_applied and v_ptr not in self.write_barrier_applied): - self.read_barrier_cache.append(v_ptr) + op1 = ResOperation(rop.STM_READ, [v_ptr], None) + self.newops.append(op1) + self.read_barrier_applied[v_ptr] = None def must_apply_write_barrier(self, val, v=None): @@ -166,6 +142,7 @@ debug_print("fallback for", op.repr()) def maybe_handle_raw_accesses(self, op): + from rpython.jit.backend.llsupport.descr import FieldDescr descr = op.getdescr() assert isinstance(descr, FieldDescr) if descr.stm_dont_track_raw_accesses: diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -157,11 +157,11 @@ """, """ [p1, p2] p3 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) p4 = getfield_gc(p1, descr=tzdescr) p5 = getfield_gc(p2, descr=tzdescr) + stm_read(p2) p6 = getfield_gc(p1, descr=tzdescr) - stm_read(p1) - stm_read(p2) jump() """) @@ -191,10 +191,10 @@ """, """ [p0, p1, p2] p3 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) cond_call_gc_wb(p2, descr=wbdescr) setfield_gc(p2, p0, descr=tzdescr) p4 = getfield_gc(p1, descr=tzdescr) - stm_read(p1) jump() """, t=NULL) @@ -388,8 +388,8 @@ """, """ [p1] p2 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) i2 = getfield_gc(p2, descr=tydescr) - stm_read(p1) stm_read(p2) jump(p2, i2) """) @@ -406,6 +406,7 @@ """, """ [p1] i1 = getfield_gc(p1, descr=tydescr) + stm_read(p1) i2 = int_add(i1, 1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i2, descr=tydescr) @@ -766,7 +767,7 @@ """, """ [i0, f0, p1] p2 = getfield_gc(p1, descr=tzdescr) - + stm_read(p1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) @@ -781,6 +782,7 @@ guard_not_forced() [] p3 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p3, descr=tzdescr) """) @@ -1180,8 +1182,8 @@ """ % d, """ [p1, p3, i1, p4] p2 = getfield_gc%(pure)s(p1, descr=uxdescr) + %(comment)s stm_read(p1) i4 = getarrayitem_gc%(pure)s(p4, i1, descr=vdescr) - %(comment)s stm_read(p1) %(comment)s stm_read(p4) jump(p2) """ % d, uxdescr=uxdescr, vdescr=vdescr) From noreply at buildbot.pypy.org Thu Apr 17 18:28:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 18:28:42 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: fix merge, re-export name into module namespace Message-ID: <20140417162842.AE82E1C048F@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70707:963c427d19ab Date: 2014-04-17 19:27 +0300 http://bitbucket.org/pypy/pypy/changeset/963c427d19ab/ Log: fix merge, re-export name into module namespace diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c From noreply at buildbot.pypy.org Thu Apr 17 19:04:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 19:04:00 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: virtualize the array iter in call2 Message-ID: <20140417170400.632531C2448@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70708:958f25449c81 Date: 2014-04-17 02:48 -0400 http://bitbucket.org/pypy/pypy/changeset/958f25449c81/ Log: virtualize the array iter in call2 diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -79,8 +79,9 @@ class ArrayIter(object): + _virtualizable_ = ['index', 'indices[*]', 'offset'] _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', - 'strides[*]', 'backstrides[*]'] + 'strides[*]', 'backstrides[*]', 'indices'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) @@ -90,11 +91,16 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.reset() + self.index = 0 + self.indices = [0] * len(shape) + self.offset = array.start + + @jit.unroll_safe def reset(self): self.index = 0 - self.indices = [0] * len(self.shape_m1) + for i in xrange(self.ndim_m1, -1, -1): + self.indices[i] = 0 self.offset = self.array.start @jit.unroll_safe diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -16,7 +16,8 @@ greens = ['shapelen', 'func', 'calc_dtype', 'res_dtype'], reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter']) + 'left_iter', 'right_iter', 'out_iter'], + virtualizables=['out_iter']) def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -99,9 +99,11 @@ assert result == 3 + 3 self.check_trace_count(1) self.check_simple_loop({ + 'arraylen_gc': 2, + 'cond_call': 2, 'float_add': 1, - 'getarrayitem_gc': 3, - 'getfield_gc': 7, + 'getarrayitem_gc': 2, + 'getfield_gc': 4, 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 3, @@ -111,8 +113,8 @@ 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 3, - 'setfield_gc': 6, + 'setarrayitem_gc': 2, + 'setfield_gc': 4, }) def define_pow(): @@ -126,13 +128,15 @@ assert result == 3 ** 2 self.check_trace_count(1) self.check_simple_loop({ + 'arraylen_gc': 2, 'call': 3, + 'cond_call': 2, 'float_add': 1, 'float_eq': 3, 'float_mul': 2, 'float_ne': 1, - 'getarrayitem_gc': 3, - 'getfield_gc': 7, + 'getarrayitem_gc': 2, + 'getfield_gc': 4, 'guard_false': 4, 'guard_not_invalidated': 1, 'guard_true': 5, @@ -143,8 +147,8 @@ 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 3, - 'setfield_gc': 6, + 'setarrayitem_gc': 2, + 'setfield_gc': 4, }) def define_sum(): @@ -526,6 +530,7 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ + 'cond_call': 2, 'getarrayitem_gc': 2, 'getfield_gc': 4, 'guard_not_invalidated': 1, @@ -564,12 +569,12 @@ 'raw_load': 2, }) self.check_resops({ - 'arraylen_gc': 1, + 'cond_call': 6, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 35, + 'getfield_gc': 41, 'getfield_gc_pure': 39, 'guard_class': 4, 'guard_false': 14, @@ -584,11 +589,11 @@ 'int_lt': 11, 'int_sub': 4, 'jump': 3, - 'new_array': 1, + 'ptr_ne': 6, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 8, - 'setfield_gc': 15, + 'setarrayitem_gc': 10, + 'setfield_gc': 14, }) def define_argsort(): From noreply at buildbot.pypy.org Thu Apr 17 19:04:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 17 Apr 2014 19:04:01 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: virtualize the array iter in dot Message-ID: <20140417170401.8E9961C2448@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70709:cc857b7e9422 Date: 2014-04-17 03:30 -0400 http://bitbucket.org/pypy/pypy/changeset/cc857b7e9422/ Log: virtualize the array iter in dot diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -270,7 +270,10 @@ dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], - reds = 'auto') + reds=['n', 's1', 's2', 'i1', 'i2', 'i', + 'left_impl', 'right_impl', 'result', + 'outi', 'lefti', 'righti', 'oval'], + virtualizables=['outi']) def multidim_dot(space, left, right, result, dtype, right_critical_dim): ''' assumes left, right are concrete arrays @@ -306,7 +309,11 @@ i = 0 while i < n: i += 1 - dot_driver.jit_merge_point(dtype=dtype) + dot_driver.jit_merge_point(dtype=dtype, + n=n, s1=s1, s2=s2, i1=i1, i2=i2, i=i, + left_impl=left_impl, right_impl=right_impl, result=result, + outi=outi, lefti=lefti, righti=righti, oval=oval, + ) lval = left_impl.getitem(i1).convert_to(space, dtype) rval = right_impl.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -569,31 +569,32 @@ 'raw_load': 2, }) self.check_resops({ - 'cond_call': 6, + 'cond_call': 4, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 41, + 'getfield_gc': 35, 'getfield_gc_pure': 39, 'guard_class': 4, - 'guard_false': 14, + 'guard_false': 18, 'guard_nonnull': 12, 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 13, - 'guard_value': 4, + 'guard_value': 6, 'int_add': 25, 'int_ge': 4, 'int_le': 8, 'int_lt': 11, 'int_sub': 4, 'jump': 3, - 'ptr_ne': 6, + 'ptr_eq': 4, + 'ptr_ne': 4, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 10, - 'setfield_gc': 14, + 'setarrayitem_gc': 7, + 'setfield_gc': 10, }) def define_argsort(): From noreply at buildbot.pypy.org Thu Apr 17 19:11:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 19:11:41 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: fix merge, redo changes to shape_agreement_multiple Message-ID: <20140417171141.574B21C3381@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70710:502dcbfd997e Date: 2014-04-17 20:02 +0300 http://bitbucket.org/pypy/pypy/changeset/502dcbfd997e/ Log: fix merge, redo changes to shape_agreement_multiple diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape From noreply at buildbot.pypy.org Thu Apr 17 19:11:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 19:11:42 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: pep8 Message-ID: <20140417171142.873991C3381@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70711:2ff1d988d4a2 Date: 2014-04-17 20:03 +0300 http://bitbucket.org/pypy/pypy/changeset/2ff1d988d4a2/ Log: pep8 diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -290,7 +290,7 @@ if not space.is_none(arr): if shape is None: shape = arr.get_shape() - else: + else: shape = shape_agreement(space, shape, arr) return shape From noreply at buildbot.pypy.org Thu Apr 17 22:58:01 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 22:58:01 +0200 (CEST) Subject: [pypy-commit] pypy default: fix includes Message-ID: <20140417205801.866511C02FC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70712:5097ad65585c Date: 2014-04-17 23:43 +0200 http://bitbucket.org/pypy/pypy/changeset/5097ad65585c/ Log: fix includes diff --git a/rpython/translator/c/src/instrument.c b/rpython/translator/c/src/instrument.c --- a/rpython/translator/c/src/instrument.c +++ b/rpython/translator/c/src/instrument.c @@ -6,10 +6,10 @@ #include #include #include +#include +#include #ifndef _WIN32 #include -#include -#include #include #else #include diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -2,6 +2,7 @@ #ifdef _WIN32 +#include #include #define __thread __declspec(thread) typedef DWORD RPyThreadTLS; From noreply at buildbot.pypy.org Thu Apr 17 23:45:27 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 23:45:27 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: test, implement itersize Message-ID: <20140417214527.3BB951C01F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70713:66513118faad Date: 2014-04-18 00:13 +0300 http://bitbucket.org/pypy/pypy/changeset/66513118faad/ Log: test, implement itersize diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -8,7 +8,7 @@ from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.concrete import SliceArray from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy import ufuncs +from pypy.module.micronumpy import ufuncs, support class AbstractIterator(object): @@ -528,8 +528,7 @@ 'not implemented yet')) def descr_get_itersize(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + return space.wrap(support.product(self.shape)) def descr_get_itviews(self, space): raise OperationError(space.w_NotImplementedError, space.wrap( diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -190,7 +190,9 @@ a = arange(3) b = arange(6).reshape(2,3) r = [] - for x,y in nditer([a, b]): + it = nditer([a, b]) + assert it.itersize == 6 + for x,y in it: r.append((x, y)) assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] a = arange(2) From noreply at buildbot.pypy.org Thu Apr 17 23:51:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 23:51:13 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: merge default into branch Message-ID: <20140417215113.D052A1C01F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70714:7db30f61132b Date: 2014-04-18 00:47 +0300 http://bitbucket.org/pypy/pypy/changeset/7db30f61132b/ Log: merge default into branch diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -478,6 +478,8 @@ return w_mod2 self.setitem(w_modules, w_name, w_mod) w_mod.init(self) + else: + self.setitem(w_modules, w_name, w_mod) return w_mod def get_builtinmodule_to_install(self): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -365,7 +365,7 @@ # we're returning with a guard_not_forced_2, and # additionally we need to say that r0 contains # a reference too: - self._finish_gcmap[0] |= r_uint(0) + self._finish_gcmap[0] |= r_uint(1) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -220,6 +220,28 @@ call_release_gil_target = func._call_aroundstate_target_ call_release_gil_target = llmemory.cast_ptr_to_adr( call_release_gil_target) + elif op.opname == 'indirect_call': + # check that we're not trying to call indirectly some + # function with the special flags + graphs = op.args[-1].value + for graph in (graphs or ()): + if not hasattr(graph, 'func'): + continue + error = None + if hasattr(graph.func, '_elidable_function_'): + error = '@jit.elidable' + if hasattr(graph.func, '_jit_loop_invariant_'): + error = '@jit.loop_invariant' + if hasattr(graph.func, '_call_aroundstate_target_'): + error = '_call_aroundstate_target_' + if not error: + continue + raise Exception( + "%r is an indirect call to a family of functions " + "(or methods) that includes %r. However, the latter " + "is marked %r. You need to use an indirection: replace " + "it with a non-marked function/method which calls the " + "marked function." % (op, graph, error)) # build the extraeffect random_effects = self.randomeffects_analyzer.analyze(op) if random_effects: diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -167,6 +167,12 @@ def is_call_release_gil(self): return bool(self.call_release_gil_target) + def __repr__(self): + more = '' + if self.oopspecindex: + more = ' OS=%r' % (self.oopspecindex,) + return '' % (id(self), self.extraeffect, more) + def frozenset_or_none(x): if x is None: diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3418,6 +3418,26 @@ 'int_sub': 2, 'jump': 1, 'call': 2, 'guard_no_exception': 2, 'int_add': 4}) + def test_elidable_method(self): + py.test.skip("not supported so far: @elidable methods") + class A(object): + @elidable + def meth(self): + return 41 + class B(A): + @elidable + def meth(self): + return 42 + x = B() + def callme(x): + return x.meth() + def f(): + callme(A()) + return callme(x) + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'finish': 1}) + def test_look_inside_iff_const_getarrayitem_gc_pure(self): driver = JitDriver(greens=['unroll'], reds=['s', 'n']) diff --git a/rpython/translator/c/src/instrument.c b/rpython/translator/c/src/instrument.c --- a/rpython/translator/c/src/instrument.c +++ b/rpython/translator/c/src/instrument.c @@ -6,10 +6,10 @@ #include #include #include +#include +#include #ifndef _WIN32 #include -#include -#include #include #else #include diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -1,3 +1,6 @@ +#ifndef _THREAD_NT_H +#define _THREAD_NT_H +#include #include /* @@ -19,4 +22,4 @@ void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock); long RPyThreadGetStackSize(void); long RPyThreadSetStackSize(long); - +#endif diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -2,6 +2,7 @@ #ifdef _WIN32 +#include #include #define __thread __declspec(thread) typedef DWORD RPyThreadTLS; From noreply at buildbot.pypy.org Thu Apr 17 23:51:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 23:51:15 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: document branch before merge Message-ID: <20140417215115.0CCBC1C01F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70715:bcb1b1c4e9d4 Date: 2014-04-18 00:49 +0300 http://bitbucket.org/pypy/pypy/changeset/bcb1b1c4e9d4/ Log: document branch before merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -139,3 +139,4 @@ Fix issues with reimporting builtin modules .. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) From noreply at buildbot.pypy.org Thu Apr 17 23:51:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 23:51:16 +0200 (CEST) Subject: [pypy-commit] pypy default: merge numpypy-nditer which implements most of the core of nditer Message-ID: <20140417215116.3A4871C01F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70716:8faf1baf09f3 Date: 2014-04-18 00:50 +0300 http://bitbucket.org/pypy/pypy/changeset/8faf1baf09f3/ Log: merge numpypy-nditer which implements most of the core of nditer diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -139,3 +139,4 @@ Fix issues with reimporting builtin modules .. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -143,6 +144,39 @@ self.array.setitem(self.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal + + def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/nditer.py @@ -0,0 +1,595 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.error import OperationError +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy import ufuncs, support + + +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, space, array): + raise NotImplementedError("Abstract Class") + +class IteratorMixin(object): + _mixin_ = True + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item[self.index](space, array, self.it) + + def setitem(self, space, array, val): + xxx + +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 + +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 + +def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + ret = [] + if space.is_w(w_op_flags, space.w_None): + for i in range(n): + ret.append(OpFlag()) + elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + else: + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) + return ret + +class OpFlag(object): + def __init__(self): + self.rw = 'r' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + self.get_it_item = (get_readonly_item, get_readonly_slice) + +def get_readonly_item(space, array, it): + return space.wrap(it.getitem()) + +def get_readwrite_item(space, array, it): + #create a single-value view (since scalars are not views) + res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) + #it.dtype.setitem(res, 0, it.getitem()) + return W_NDimArray(res) + +def get_readonly_slice(space, array, it): + return W_NDimArray(it.getslice().readonly()) + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + +def parse_op_flag(space, lst): + op_flag = OpFlag() + for w_item in lst: + item = space.str_w(w_item) + if item == 'readonly': + op_flag.rw = 'r' + elif item == 'readwrite': + op_flag.rw = 'rw' + elif item == 'writeonly': + op_flag.rw = 'w' + elif item == 'no_broadcast': + op_flag.broadcast = False + elif item == 'contig': + op_flag.force_contig = True + elif item == 'aligned': + op_flag.force_align = True + elif item == 'nbo': + op_flag.native_byte_order = True + elif item == 'copy': + op_flag.tmp_copy = 'r' + elif item == 'updateifcopy': + op_flag.tmp_copy = 'rw' + elif item == 'allocate': + op_flag.allocate = True + elif item == 'no_subtype': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"no_subtype" op_flag not implemented yet')) + elif item == 'arraymask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"arraymask" op_flag not implemented yet')) + elif item == 'writemask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"writemask" op_flag not implemented yet')) + else: + raise OperationError(space.w_ValueError, space.wrap( + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == 'r': + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) + elif op_flag.rw == 'rw': + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + return op_flag + +def parse_func_flags(space, nditer, w_flags): + if space.is_w(w_flags, space.w_None): + return + elif not space.isinstance_w(w_flags, space.w_tuple) and not \ + space.isinstance_w(w_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + 'Iter global flags must be a list or tuple of strings')) + lst = space.listview(w_flags) + for w_item in lst: + if not space.isinstance_w(w_item, space.w_str) and not \ + space.isinstance_w(w_item, space.w_unicode): + typename = space.type(w_item).getname(space) + raise OperationError(space.w_TypeError, space.wrap( + 'expected string or Unicode object, %s found' % typename)) + item = space.str_w(w_item) + if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) + nditer.external_loop = True + elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) + # For numpy compatability + nditer.buffered = True + elif item == 'c_index': + nditer.tracked_index = 'C' + elif item == 'f_index': + nditer.tracked_index = 'F' + elif item == 'multi_index': + nditer.tracked_index = 'multi' + elif item == 'common_dtype': + nditer.common_dtype = True + elif item == 'delay_bufalloc': + nditer.delay_bufalloc = True + elif item == 'grow_inner': + nditer.grow_inner = True + elif item == 'ranged': + nditer.ranged = True + elif item == 'refs_ok': + nditer.refs_ok = True + elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) + nditer.reduce_ok = True + elif item == 'zerosize_ok': + nditer.zerosize_ok = True + else: + raise OperationError(space.w_ValueError, space.wrap( + 'Unexpected iterator global flag "%s"' % item)) + if nditer.tracked_index and nditer.external_loop: + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order =='F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation + backward = is_backward(imp, order) + if arr.is_scalar(): + return ArrayIter(imp, 1, [], [], []) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret + +class W_NDIter(W_Root): + + def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, + w_op_axes, w_itershape, w_buffersize, order): + self.order = order + self.external_loop = False + self.buffered = False + self.tracked_index = '' + self.common_dtype = False + self.delay_bufalloc = False + self.grow_inner = False + self.ranged = False + self.refs_ok = False + self.reduce_ok = False + self.zerosize_ok = False + self.index_iter = None + self.done = False + self.first_next = True + self.op_axes = [] + # convert w_seq operands to a list of W_NDimArray + if space.isinstance_w(w_seq, space.w_tuple) or \ + space.isinstance_w(w_seq, space.w_list): + w_seq_as_list = space.listview(w_seq) + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + else: + self.seq =[convert_to_array(space, w_seq)] + + parse_func_flags(space, self, w_flags) + self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, + len(self.seq), parse_op_flag) + # handle w_op_axes + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) + + # handle w_op_dtypes part 1: creating self.dtypes list from input + if not space.is_none(w_op_dtypes): + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] + + # handle None or writable operands, calculate my shape + self.iters=[] + outargs = [i for i in range(len(self.seq)) \ + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation + if len(self.dtypes) > 0: + out_dtype = self.dtypes[outargs[0]] + else: + out_dtype = None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype(space, + self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) + + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + if self.tracked_index == "multi": + backward = False + else: + backward = self.order != self.tracked_index + self.index_iter = IndexIterator(iter_shape, backward=backward) + + # handle w_op_dtypes part 2: copy where needed if possible + if len(self.dtypes) > 0: + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d: + if not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering for operand %d" % i)) + impl = self.seq[i].implementation + new_impl = impl.astype(space, selfd) + self.seq[i] = W_NDimArray(new_impl) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] + + # create an iterator for each operand + if self.external_loop: + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + def set_op_axes(self, space, w_op_axes): + if space.len_w(w_op_axes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + op_axes = space.listview(w_op_axes) + l = -1 + for w_axis in op_axes: + if not space.is_none(w_axis): + axis_len = space.len_w(w_axis) + if l == -1: + l = axis_len + elif axis_len != l: + raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + if l == -1: + raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 + + def descr_iter(self, space): + return space.wrap(self) + + def descr_getitem(self, space, w_idx): + idx = space.int_w(w_idx) + try: + ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + except IndexError: + raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + return ret + + def descr_setitem(self, space, w_idx, w_value): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_len(self, space): + space.wrap(len(self.iters)) + + def descr_next(self, space): + for it in self.iters: + if not it.done(): + break + else: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + res = [] + if self.index_iter: + if not self.first_next: + self.index_iter.next() + else: + self.first_next = False + for i in range(len(self.iters)): + res.append(self.iters[i].getitem(space, self.seq[i])) + self.iters[i].next() + if len(res) <2: + return res[0] + return space.newtuple(res) + + def iternext(self): + if self.index_iter: + self.index_iter.next() + for i in range(len(self.iters)): + self.iters[i].next() + for it in self.iters: + if not it.done(): + break + else: + self.done = True + return self.done + return self.done + + def descr_iternext(self, space): + return space.wrap(self.iternext()) + + def descr_copy(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_debug_print(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_enable_external_loop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + @unwrap_spec(axis=int) + def descr_remove_axis(self, space, axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_remove_multi_index(self, space, w_multi_index): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_reset(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_operands(self, space): + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) + + def descr_get_dtypes(self, space): + res = [None] * len(self.seq) + for i in range(len(self.seq)): + res[i] = self.seq[i].descr_get_dtype(space) + return space.newtuple(res) + + def descr_get_finished(self, space): + return space.wrap(self.done) + + def descr_get_has_delayed_bufalloc(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_has_index(self, space): + return space.wrap(self.tracked_index in ["C", "F"]) + + def descr_get_index(self, space): + if not self.tracked_index in ["C", "F"]: + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.wrap(self.index_iter.getvalue()) + + def descr_get_has_multi_index(self, space): + return space.wrap(self.tracked_index == "multi") + + def descr_get_multi_index(self, space): + if not self.tracked_index == "multi": + raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.newtuple([space.wrap(x) for x in self.index_iter.index]) + + def descr_get_iterationneedsapi(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_iterindex(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itersize(self, space): + return space.wrap(support.product(self.shape)) + + def descr_get_itviews(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_ndim(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_nop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_shape(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_value(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + + at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes = WrappedDefault(None), order=str, + w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) +def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order='K'): + return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order) + +W_NDIter.typedef = TypeDef( + 'nditer', + __iter__ = interp2app(W_NDIter.descr_iter), + __getitem__ = interp2app(W_NDIter.descr_getitem), + __setitem__ = interp2app(W_NDIter.descr_setitem), + __len__ = interp2app(W_NDIter.descr_len), + + next = interp2app(W_NDIter.descr_next), + iternext = interp2app(W_NDIter.descr_iternext), + copy = interp2app(W_NDIter.descr_copy), + debug_print = interp2app(W_NDIter.descr_debug_print), + enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), + remove_axis = interp2app(W_NDIter.descr_remove_axis), + remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), + reset = interp2app(W_NDIter.descr_reset), + + operands = GetSetProperty(W_NDIter.descr_get_operands), + dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), + finished = GetSetProperty(W_NDIter.descr_get_finished), + has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), + has_index = GetSetProperty(W_NDIter.descr_get_has_index), + index = GetSetProperty(W_NDIter.descr_get_index), + has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), + multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), + iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), + iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), + itersize = GetSetProperty(W_NDIter.descr_get_itersize), + itviews = GetSetProperty(W_NDIter.descr_get_itviews), + ndim = GetSetProperty(W_NDIter.descr_get_ndim), + nop = GetSetProperty(W_NDIter.descr_get_nop), + shape = GetSetProperty(W_NDIter.descr_get_shape), + value = GetSetProperty(W_NDIter.descr_get_value), +) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -0,0 +1,302 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNDIter(BaseNumpyAppTest): + def test_basic(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + + for x in nditer(a.T): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + + def test_order(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a, order='C'): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + for x in nditer(a, order='F'): + r.append(x) + assert r == [0, 3, 1, 4, 2, 5] + + def test_readwrite(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + for x in nditer(a, op_flags=['readwrite']): + x[...] = 2 * x + assert (a == [[0, 2, 4], [6, 8, 10]]).all() + + def test_external_loop(self): + from numpy import arange, nditer, array + a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + r = [] + n = 0 + for x in nditer(a, flags=['external_loop']): + r.append(x) + n += 1 + assert n == 1 + assert (array(r) == range(24)).all() + r = [] + n = 0 + for x in nditer(a, flags=['external_loop'], order='F'): + r.append(x) + n += 1 + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + + def test_index(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + exc = None + try: + it.index + except ValueError, e: + exc = e + assert exc + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + + def test_interface(self): + from numpy import arange, nditer, zeros + import sys + a = arange(6).reshape(2,3) + r = [] + it = nditer(a, flags=['f_index']) + while not it.finished: + r.append((it[0], it.index)) + it.iternext() + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') + while not it.finished: + it[0] = it.multi_index[1] - it.multi_index[0] + it.iternext() + assert (a == [[0, 1, 2], [-1, 0, 1]]).all() + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + + def test_buffered(self): + from numpy import arange, nditer, array + a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') + r = [] + for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() + + def test_op_dtype(self): + from numpy import arange, nditer, sqrt, array + a = arange(6).reshape(2,3) - 3 + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) + assert str(exc.value).startswith("Iterator operand required copying or buffering") + r = [] + for x in nditer(a, op_flags=['readonly','copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + r = [] + for x in nditer(a, op_flags=['copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], + op_dtypes = ['int64', 'int64', 'float64'], + op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) + for a, b, c in multi: + a[...] = b * c + assert (multi.operands[0] == [4, 6]).all() + + def test_casting(self): + from numpy import arange, nditer + import sys + a = arange(6.) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], + casting='same_kind'): + r.append(x) + assert r == [0., 1., 2., 3., 4., 5.] + exc = raises(TypeError, nditer, a, flags=['buffered'], + op_dtypes=['int32'], casting='same_kind') + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + b = arange(6) + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + op_flags=['readwrite'], casting='same_kind') + assert str(exc.value).startswith("Iterator requested dtype could not be cast") + + def test_broadcast(self): + from numpy import arange, nditer + a = arange(3) + b = arange(6).reshape(2,3) + r = [] + it = nditer([a, b]) + assert it.itersize == 6 + for x,y in it: + r.append((x, y)) + assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] + a = arange(2) + exc = raises(ValueError, nditer, [a, b]) + assert str(exc.value).find('shapes (2) (2,3)') > 0 + + def test_outarg(self): + from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') + + def square1(a): + it = nditer([a, None]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square1([1, 2, 3]) == [1, 4, 9]).all() + + def square2(a, out=None): + it = nditer([a, out], flags=['external_loop', 'buffered'], + op_flags=[['readonly'], + ['writeonly', 'allocate', 'no_broadcast']]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square2([1, 2, 3]) == [1, 4, 9]).all() + b = zeros((3, )) + c = square2([1, 2, 3], out=b) + assert (c == [1., 4., 9.]).all() + assert (b == c).all() + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) + assert str(exc.value).find('cannot be broadcasted') > 0 + + def test_outer_product(self): + from numpy import nditer, arange + a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + b = arange(8).reshape(2,4) + it = nditer([a, b, None], flags=['external_loop'], + op_axes=[[0, -1, -1], [-1, 0, 1], None]) + for x, y, z in it: + z[...] = x*y + assert it.operands[2].shape == (3, 2, 4) + for i in range(a.size): + assert (it.operands[2][i] == a[i]*b).all() + + def test_reduction(self): + from numpy import nditer, arange, array + import sys + a = arange(24).reshape(2, 3, 4) + b = array(0) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') + #reduction operands must be readwrite + for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite']]): + y[...] += x + assert b == 276 + assert b == a.sum() + + # reduction and allocation requires op_axes and initialization + it = nditer([a, None], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + # previous example with buffering, requires more flags and reset + it = nditer([a, None], flags=['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + it.reset() + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + def test_get_dtypes(self): + from numpy import array, nditer + x = array([1, 2]) + y = array([1.0, 2.0]) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) + + def test_multi_index(self): + import numpy as np + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + res = [] + while not it.finished: + res.append((it[0], it.multi_index)) + it.iternext() + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] From noreply at buildbot.pypy.org Thu Apr 17 23:53:09 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 17 Apr 2014 23:53:09 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: close merged branch Message-ID: <20140417215309.5C96B1C01F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70717:6247a9f002ec Date: 2014-04-18 00:52 +0300 http://bitbucket.org/pypy/pypy/changeset/6247a9f002ec/ Log: close merged branch From noreply at buildbot.pypy.org Fri Apr 18 00:08:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:08:45 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid new array in iter reset Message-ID: <20140417220846.04DE21C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70718:5276d89f1cca Date: 2014-04-17 15:08 -0400 http://bitbucket.org/pypy/pypy/changeset/5276d89f1cca/ Log: avoid new array in iter reset diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -90,11 +90,16 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.reset() + self.index = 0 + self.indices = [0] * len(shape) + self.offset = array.start + + @jit.unroll_safe def reset(self): self.index = 0 - self.indices = [0] * len(self.shape_m1) + for i in xrange(self.ndim_m1, -1, -1): + self.indices[i] = 0 self.offset = self.array.start @jit.unroll_safe diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -564,7 +564,6 @@ 'raw_load': 2, }) self.check_resops({ - 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, @@ -584,11 +583,10 @@ 'int_lt': 11, 'int_sub': 4, 'jump': 3, - 'new_array': 1, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 8, - 'setfield_gc': 15, + 'setarrayitem_gc': 10, + 'setfield_gc': 14, }) def define_argsort(): From noreply at buildbot.pypy.org Fri Apr 18 00:08:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:08:47 +0200 (CEST) Subject: [pypy-commit] pypy default: make sure argmax/argmin don't swallow exceptions Message-ID: <20140417220847.3C6A41C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70719:ac337a3c7780 Date: 2014-04-17 16:38 -0400 http://bitbucket.org/pypy/pypy/changeset/ac337a3c7780/ Log: make sure argmax/argmin don't swallow exceptions diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -996,7 +996,8 @@ descr_cumsum = _reduce_ufunc_impl('add', cumulative=True) descr_cumprod = _reduce_ufunc_impl('multiply', cumulative=True) - def _reduce_argmax_argmin_impl(op_name): + def _reduce_argmax_argmin_impl(raw_name): + op_name = "arg%s" % raw_name def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): raise oefmt(space.w_NotImplementedError, @@ -1007,18 +1008,17 @@ if self.get_size() == 0: raise oefmt(space.w_ValueError, "Can't call %s on zero-size arrays", op_name) - op = getattr(loop, op_name) try: - res = op(self) + getattr(self.get_dtype().itemtype, raw_name) except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', op_name, self.get_dtype().get_name()) - return space.wrap(res) - return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) + return space.wrap(getattr(loop, op_name)(self)) + return func_with_new_name(impl, "reduce_%s_impl" % op_name) - descr_argmax = _reduce_argmax_argmin_impl("argmax") - descr_argmin = _reduce_argmax_argmin_impl("argmin") + descr_argmax = _reduce_argmax_argmin_impl("max") + descr_argmin = _reduce_argmax_argmin_impl("min") def descr_int(self, space): if self.get_size() != 1: From noreply at buildbot.pypy.org Fri Apr 18 00:08:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:08:48 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140417220848.ACFB71C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70720:9d5e3ed6389f Date: 2014-04-17 18:07 -0400 http://bitbucket.org/pypy/pypy/changeset/9d5e3ed6389f/ Log: merge heads diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -139,3 +139,4 @@ Fix issues with reimporting builtin modules .. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -148,6 +149,39 @@ self.array.setitem(self.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal + + def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/nditer.py @@ -0,0 +1,595 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.error import OperationError +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy import ufuncs, support + + +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, space, array): + raise NotImplementedError("Abstract Class") + +class IteratorMixin(object): + _mixin_ = True + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item[self.index](space, array, self.it) + + def setitem(self, space, array, val): + xxx + +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 + +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 + +def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + ret = [] + if space.is_w(w_op_flags, space.w_None): + for i in range(n): + ret.append(OpFlag()) + elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + else: + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) + return ret + +class OpFlag(object): + def __init__(self): + self.rw = 'r' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + self.get_it_item = (get_readonly_item, get_readonly_slice) + +def get_readonly_item(space, array, it): + return space.wrap(it.getitem()) + +def get_readwrite_item(space, array, it): + #create a single-value view (since scalars are not views) + res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) + #it.dtype.setitem(res, 0, it.getitem()) + return W_NDimArray(res) + +def get_readonly_slice(space, array, it): + return W_NDimArray(it.getslice().readonly()) + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + +def parse_op_flag(space, lst): + op_flag = OpFlag() + for w_item in lst: + item = space.str_w(w_item) + if item == 'readonly': + op_flag.rw = 'r' + elif item == 'readwrite': + op_flag.rw = 'rw' + elif item == 'writeonly': + op_flag.rw = 'w' + elif item == 'no_broadcast': + op_flag.broadcast = False + elif item == 'contig': + op_flag.force_contig = True + elif item == 'aligned': + op_flag.force_align = True + elif item == 'nbo': + op_flag.native_byte_order = True + elif item == 'copy': + op_flag.tmp_copy = 'r' + elif item == 'updateifcopy': + op_flag.tmp_copy = 'rw' + elif item == 'allocate': + op_flag.allocate = True + elif item == 'no_subtype': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"no_subtype" op_flag not implemented yet')) + elif item == 'arraymask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"arraymask" op_flag not implemented yet')) + elif item == 'writemask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"writemask" op_flag not implemented yet')) + else: + raise OperationError(space.w_ValueError, space.wrap( + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == 'r': + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) + elif op_flag.rw == 'rw': + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + return op_flag + +def parse_func_flags(space, nditer, w_flags): + if space.is_w(w_flags, space.w_None): + return + elif not space.isinstance_w(w_flags, space.w_tuple) and not \ + space.isinstance_w(w_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + 'Iter global flags must be a list or tuple of strings')) + lst = space.listview(w_flags) + for w_item in lst: + if not space.isinstance_w(w_item, space.w_str) and not \ + space.isinstance_w(w_item, space.w_unicode): + typename = space.type(w_item).getname(space) + raise OperationError(space.w_TypeError, space.wrap( + 'expected string or Unicode object, %s found' % typename)) + item = space.str_w(w_item) + if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) + nditer.external_loop = True + elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) + # For numpy compatability + nditer.buffered = True + elif item == 'c_index': + nditer.tracked_index = 'C' + elif item == 'f_index': + nditer.tracked_index = 'F' + elif item == 'multi_index': + nditer.tracked_index = 'multi' + elif item == 'common_dtype': + nditer.common_dtype = True + elif item == 'delay_bufalloc': + nditer.delay_bufalloc = True + elif item == 'grow_inner': + nditer.grow_inner = True + elif item == 'ranged': + nditer.ranged = True + elif item == 'refs_ok': + nditer.refs_ok = True + elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) + nditer.reduce_ok = True + elif item == 'zerosize_ok': + nditer.zerosize_ok = True + else: + raise OperationError(space.w_ValueError, space.wrap( + 'Unexpected iterator global flag "%s"' % item)) + if nditer.tracked_index and nditer.external_loop: + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order =='F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation + backward = is_backward(imp, order) + if arr.is_scalar(): + return ArrayIter(imp, 1, [], [], []) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret + +class W_NDIter(W_Root): + + def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, + w_op_axes, w_itershape, w_buffersize, order): + self.order = order + self.external_loop = False + self.buffered = False + self.tracked_index = '' + self.common_dtype = False + self.delay_bufalloc = False + self.grow_inner = False + self.ranged = False + self.refs_ok = False + self.reduce_ok = False + self.zerosize_ok = False + self.index_iter = None + self.done = False + self.first_next = True + self.op_axes = [] + # convert w_seq operands to a list of W_NDimArray + if space.isinstance_w(w_seq, space.w_tuple) or \ + space.isinstance_w(w_seq, space.w_list): + w_seq_as_list = space.listview(w_seq) + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + else: + self.seq =[convert_to_array(space, w_seq)] + + parse_func_flags(space, self, w_flags) + self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, + len(self.seq), parse_op_flag) + # handle w_op_axes + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) + + # handle w_op_dtypes part 1: creating self.dtypes list from input + if not space.is_none(w_op_dtypes): + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] + + # handle None or writable operands, calculate my shape + self.iters=[] + outargs = [i for i in range(len(self.seq)) \ + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation + if len(self.dtypes) > 0: + out_dtype = self.dtypes[outargs[0]] + else: + out_dtype = None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype(space, + self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) + + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + if self.tracked_index == "multi": + backward = False + else: + backward = self.order != self.tracked_index + self.index_iter = IndexIterator(iter_shape, backward=backward) + + # handle w_op_dtypes part 2: copy where needed if possible + if len(self.dtypes) > 0: + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d: + if not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering for operand %d" % i)) + impl = self.seq[i].implementation + new_impl = impl.astype(space, selfd) + self.seq[i] = W_NDimArray(new_impl) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] + + # create an iterator for each operand + if self.external_loop: + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + def set_op_axes(self, space, w_op_axes): + if space.len_w(w_op_axes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + op_axes = space.listview(w_op_axes) + l = -1 + for w_axis in op_axes: + if not space.is_none(w_axis): + axis_len = space.len_w(w_axis) + if l == -1: + l = axis_len + elif axis_len != l: + raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + if l == -1: + raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 + + def descr_iter(self, space): + return space.wrap(self) + + def descr_getitem(self, space, w_idx): + idx = space.int_w(w_idx) + try: + ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + except IndexError: + raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + return ret + + def descr_setitem(self, space, w_idx, w_value): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_len(self, space): + space.wrap(len(self.iters)) + + def descr_next(self, space): + for it in self.iters: + if not it.done(): + break + else: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + res = [] + if self.index_iter: + if not self.first_next: + self.index_iter.next() + else: + self.first_next = False + for i in range(len(self.iters)): + res.append(self.iters[i].getitem(space, self.seq[i])) + self.iters[i].next() + if len(res) <2: + return res[0] + return space.newtuple(res) + + def iternext(self): + if self.index_iter: + self.index_iter.next() + for i in range(len(self.iters)): + self.iters[i].next() + for it in self.iters: + if not it.done(): + break + else: + self.done = True + return self.done + return self.done + + def descr_iternext(self, space): + return space.wrap(self.iternext()) + + def descr_copy(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_debug_print(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_enable_external_loop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + @unwrap_spec(axis=int) + def descr_remove_axis(self, space, axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_remove_multi_index(self, space, w_multi_index): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_reset(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_operands(self, space): + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) + + def descr_get_dtypes(self, space): + res = [None] * len(self.seq) + for i in range(len(self.seq)): + res[i] = self.seq[i].descr_get_dtype(space) + return space.newtuple(res) + + def descr_get_finished(self, space): + return space.wrap(self.done) + + def descr_get_has_delayed_bufalloc(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_has_index(self, space): + return space.wrap(self.tracked_index in ["C", "F"]) + + def descr_get_index(self, space): + if not self.tracked_index in ["C", "F"]: + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.wrap(self.index_iter.getvalue()) + + def descr_get_has_multi_index(self, space): + return space.wrap(self.tracked_index == "multi") + + def descr_get_multi_index(self, space): + if not self.tracked_index == "multi": + raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.newtuple([space.wrap(x) for x in self.index_iter.index]) + + def descr_get_iterationneedsapi(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_iterindex(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itersize(self, space): + return space.wrap(support.product(self.shape)) + + def descr_get_itviews(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_ndim(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_nop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_shape(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_value(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + + at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes = WrappedDefault(None), order=str, + w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) +def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order='K'): + return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order) + +W_NDIter.typedef = TypeDef( + 'nditer', + __iter__ = interp2app(W_NDIter.descr_iter), + __getitem__ = interp2app(W_NDIter.descr_getitem), + __setitem__ = interp2app(W_NDIter.descr_setitem), + __len__ = interp2app(W_NDIter.descr_len), + + next = interp2app(W_NDIter.descr_next), + iternext = interp2app(W_NDIter.descr_iternext), + copy = interp2app(W_NDIter.descr_copy), + debug_print = interp2app(W_NDIter.descr_debug_print), + enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), + remove_axis = interp2app(W_NDIter.descr_remove_axis), + remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), + reset = interp2app(W_NDIter.descr_reset), + + operands = GetSetProperty(W_NDIter.descr_get_operands), + dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), + finished = GetSetProperty(W_NDIter.descr_get_finished), + has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), + has_index = GetSetProperty(W_NDIter.descr_get_has_index), + index = GetSetProperty(W_NDIter.descr_get_index), + has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), + multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), + iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), + iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), + itersize = GetSetProperty(W_NDIter.descr_get_itersize), + itviews = GetSetProperty(W_NDIter.descr_get_itviews), + ndim = GetSetProperty(W_NDIter.descr_get_ndim), + nop = GetSetProperty(W_NDIter.descr_get_nop), + shape = GetSetProperty(W_NDIter.descr_get_shape), + value = GetSetProperty(W_NDIter.descr_get_value), +) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -0,0 +1,302 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNDIter(BaseNumpyAppTest): + def test_basic(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + + for x in nditer(a.T): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + + def test_order(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a, order='C'): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + for x in nditer(a, order='F'): + r.append(x) + assert r == [0, 3, 1, 4, 2, 5] + + def test_readwrite(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + for x in nditer(a, op_flags=['readwrite']): + x[...] = 2 * x + assert (a == [[0, 2, 4], [6, 8, 10]]).all() + + def test_external_loop(self): + from numpy import arange, nditer, array + a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + r = [] + n = 0 + for x in nditer(a, flags=['external_loop']): + r.append(x) + n += 1 + assert n == 1 + assert (array(r) == range(24)).all() + r = [] + n = 0 + for x in nditer(a, flags=['external_loop'], order='F'): + r.append(x) + n += 1 + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + + def test_index(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + exc = None + try: + it.index + except ValueError, e: + exc = e + assert exc + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + + def test_interface(self): + from numpy import arange, nditer, zeros + import sys + a = arange(6).reshape(2,3) + r = [] + it = nditer(a, flags=['f_index']) + while not it.finished: + r.append((it[0], it.index)) + it.iternext() + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') + while not it.finished: + it[0] = it.multi_index[1] - it.multi_index[0] + it.iternext() + assert (a == [[0, 1, 2], [-1, 0, 1]]).all() + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + + def test_buffered(self): + from numpy import arange, nditer, array + a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') + r = [] + for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() + + def test_op_dtype(self): + from numpy import arange, nditer, sqrt, array + a = arange(6).reshape(2,3) - 3 + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) + assert str(exc.value).startswith("Iterator operand required copying or buffering") + r = [] + for x in nditer(a, op_flags=['readonly','copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + r = [] + for x in nditer(a, op_flags=['copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], + op_dtypes = ['int64', 'int64', 'float64'], + op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) + for a, b, c in multi: + a[...] = b * c + assert (multi.operands[0] == [4, 6]).all() + + def test_casting(self): + from numpy import arange, nditer + import sys + a = arange(6.) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], + casting='same_kind'): + r.append(x) + assert r == [0., 1., 2., 3., 4., 5.] + exc = raises(TypeError, nditer, a, flags=['buffered'], + op_dtypes=['int32'], casting='same_kind') + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + b = arange(6) + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + op_flags=['readwrite'], casting='same_kind') + assert str(exc.value).startswith("Iterator requested dtype could not be cast") + + def test_broadcast(self): + from numpy import arange, nditer + a = arange(3) + b = arange(6).reshape(2,3) + r = [] + it = nditer([a, b]) + assert it.itersize == 6 + for x,y in it: + r.append((x, y)) + assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] + a = arange(2) + exc = raises(ValueError, nditer, [a, b]) + assert str(exc.value).find('shapes (2) (2,3)') > 0 + + def test_outarg(self): + from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') + + def square1(a): + it = nditer([a, None]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square1([1, 2, 3]) == [1, 4, 9]).all() + + def square2(a, out=None): + it = nditer([a, out], flags=['external_loop', 'buffered'], + op_flags=[['readonly'], + ['writeonly', 'allocate', 'no_broadcast']]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square2([1, 2, 3]) == [1, 4, 9]).all() + b = zeros((3, )) + c = square2([1, 2, 3], out=b) + assert (c == [1., 4., 9.]).all() + assert (b == c).all() + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) + assert str(exc.value).find('cannot be broadcasted') > 0 + + def test_outer_product(self): + from numpy import nditer, arange + a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + b = arange(8).reshape(2,4) + it = nditer([a, b, None], flags=['external_loop'], + op_axes=[[0, -1, -1], [-1, 0, 1], None]) + for x, y, z in it: + z[...] = x*y + assert it.operands[2].shape == (3, 2, 4) + for i in range(a.size): + assert (it.operands[2][i] == a[i]*b).all() + + def test_reduction(self): + from numpy import nditer, arange, array + import sys + a = arange(24).reshape(2, 3, 4) + b = array(0) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') + #reduction operands must be readwrite + for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite']]): + y[...] += x + assert b == 276 + assert b == a.sum() + + # reduction and allocation requires op_axes and initialization + it = nditer([a, None], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + # previous example with buffering, requires more flags and reset + it = nditer([a, None], flags=['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + it.reset() + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + def test_get_dtypes(self): + from numpy import array, nditer + x = array([1, 2]) + y = array([1.0, 2.0]) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) + + def test_multi_index(self): + import numpy as np + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + res = [] + while not it.finished: + res.append((it[0], it.multi_index)) + it.iternext() + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] diff --git a/rpython/translator/c/src/instrument.c b/rpython/translator/c/src/instrument.c --- a/rpython/translator/c/src/instrument.c +++ b/rpython/translator/c/src/instrument.c @@ -6,10 +6,10 @@ #include #include #include +#include +#include #ifndef _WIN32 #include -#include -#include #include #else #include diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -2,6 +2,7 @@ #ifdef _WIN32 +#include #include #define __thread __declspec(thread) typedef DWORD RPyThreadTLS; From noreply at buildbot.pypy.org Fri Apr 18 00:09:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:09:04 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: reset to try new approach Message-ID: <20140417220904.EF6761C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70721:ae7367976397 Date: 2014-04-17 15:16 -0400 http://bitbucket.org/pypy/pypy/changeset/ae7367976397/ Log: reset to try new approach diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -79,9 +79,8 @@ class ArrayIter(object): - _virtualizable_ = ['index', 'indices[*]', 'offset'] _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', - 'strides[*]', 'backstrides[*]', 'indices'] + 'strides[*]', 'backstrides[*]'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) @@ -91,16 +90,11 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides + self.reset() - self.index = 0 - self.indices = [0] * len(shape) - self.offset = array.start - - @jit.unroll_safe def reset(self): self.index = 0 - for i in xrange(self.ndim_m1, -1, -1): - self.indices[i] = 0 + self.indices = [0] * len(self.shape_m1) self.offset = self.array.start @jit.unroll_safe diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -16,8 +16,7 @@ greens = ['shapelen', 'func', 'calc_dtype', 'res_dtype'], reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter'], - virtualizables=['out_iter']) + 'left_iter', 'right_iter', 'out_iter']) def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority @@ -270,10 +269,7 @@ dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], - reds=['n', 's1', 's2', 'i1', 'i2', 'i', - 'left_impl', 'right_impl', 'result', - 'outi', 'lefti', 'righti', 'oval'], - virtualizables=['outi']) + reds = 'auto') def multidim_dot(space, left, right, result, dtype, right_critical_dim): ''' assumes left, right are concrete arrays @@ -309,11 +305,7 @@ i = 0 while i < n: i += 1 - dot_driver.jit_merge_point(dtype=dtype, - n=n, s1=s1, s2=s2, i1=i1, i2=i2, i=i, - left_impl=left_impl, right_impl=right_impl, result=result, - outi=outi, lefti=lefti, righti=righti, oval=oval, - ) + dot_driver.jit_merge_point(dtype=dtype) lval = left_impl.getitem(i1).convert_to(space, dtype) rval = right_impl.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -99,11 +99,9 @@ assert result == 3 + 3 self.check_trace_count(1) self.check_simple_loop({ - 'arraylen_gc': 2, - 'cond_call': 2, 'float_add': 1, - 'getarrayitem_gc': 2, - 'getfield_gc': 4, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 3, @@ -113,8 +111,8 @@ 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 2, - 'setfield_gc': 4, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, }) def define_pow(): @@ -128,15 +126,13 @@ assert result == 3 ** 2 self.check_trace_count(1) self.check_simple_loop({ - 'arraylen_gc': 2, 'call': 3, - 'cond_call': 2, 'float_add': 1, 'float_eq': 3, 'float_mul': 2, 'float_ne': 1, - 'getarrayitem_gc': 2, - 'getfield_gc': 4, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, 'guard_false': 4, 'guard_not_invalidated': 1, 'guard_true': 5, @@ -147,8 +143,8 @@ 'jump': 1, 'raw_load': 2, 'raw_store': 1, - 'setarrayitem_gc': 2, - 'setfield_gc': 4, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, }) def define_sum(): @@ -530,7 +526,6 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'cond_call': 2, 'getarrayitem_gc': 2, 'getfield_gc': 4, 'guard_not_invalidated': 1, @@ -569,7 +564,7 @@ 'raw_load': 2, }) self.check_resops({ - 'cond_call': 4, + 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, @@ -577,24 +572,23 @@ 'getfield_gc': 35, 'getfield_gc_pure': 39, 'guard_class': 4, - 'guard_false': 18, + 'guard_false': 14, 'guard_nonnull': 12, 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 13, - 'guard_value': 6, + 'guard_value': 4, 'int_add': 25, 'int_ge': 4, 'int_le': 8, 'int_lt': 11, 'int_sub': 4, 'jump': 3, - 'ptr_eq': 4, - 'ptr_ne': 4, + 'new_array': 1, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 7, - 'setfield_gc': 10, + 'setarrayitem_gc': 8, + 'setfield_gc': 15, }) def define_argsort(): From noreply at buildbot.pypy.org Fri Apr 18 00:09:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:09:06 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: merge default Message-ID: <20140417220906.411971C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70722:644c8a2d7eee Date: 2014-04-17 15:17 -0400 http://bitbucket.org/pypy/pypy/changeset/644c8a2d7eee/ Log: merge default diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -90,11 +90,16 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.reset() + self.index = 0 + self.indices = [0] * len(shape) + self.offset = array.start + + @jit.unroll_safe def reset(self): self.index = 0 - self.indices = [0] * len(self.shape_m1) + for i in xrange(self.ndim_m1, -1, -1): + self.indices[i] = 0 self.offset = self.array.start @jit.unroll_safe diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -564,7 +564,6 @@ 'raw_load': 2, }) self.check_resops({ - 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, @@ -584,11 +583,10 @@ 'int_lt': 11, 'int_sub': 4, 'jump': 3, - 'new_array': 1, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 8, - 'setfield_gc': 15, + 'setarrayitem_gc': 10, + 'setfield_gc': 14, }) def define_argsort(): From noreply at buildbot.pypy.org Fri Apr 18 00:09:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:09:07 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: create iter state object to help jit in loops Message-ID: <20140417220907.670AC1C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70723:04699e4b9dd0 Date: 2014-04-17 17:40 -0400 http://bitbucket.org/pypy/pypy/changeset/04699e4b9dd0/ Log: create iter state object to help jit in loops diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -78,6 +78,13 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] +class IterState(object): + def __init__(self, index, indices, offset): + self.index = index + self.indices = indices + self.offset = offset + + class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]'] @@ -91,61 +98,59 @@ self.strides = strides self.backstrides = backstrides - self.index = 0 - self.indices = [0] * len(shape) - self.offset = array.start + def reset(self): + return IterState(0, [0] * len(self.shape_m1), self.array.start) @jit.unroll_safe - def reset(self): - self.index = 0 + def next(self, state): + index = state.index + 1 + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - self.indices[i] = 0 - self.offset = self.array.start + idx = indices[i] + if idx < self.shape_m1[i]: + indices[i] = idx + 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + return IterState(index, indices, offset) @jit.unroll_safe - def next(self): - self.index += 1 + def next_skip_x(self, state, step): + assert step >= 0 + if step == 0: + return state + index = state.index + step + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < self.shape_m1[i]: - self.indices[i] = idx + 1 - self.offset += self.strides[i] + idx = indices[i] + if idx < (self.shape_m1[i] + 1) - step: + indices[i] = idx + step + offset += self.strides[i] * step break else: - self.indices[i] = 0 - self.offset -= self.backstrides[i] - - @jit.unroll_safe - def next_skip_x(self, step): - assert step >= 0 - if step == 0: - return - self.index += step - for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < (self.shape_m1[i] + 1) - step: - self.indices[i] = idx + step - self.offset += self.strides[i] * step - break - else: - rem_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + rem_step = (idx + step) // (self.shape_m1[i] + 1) cur_step = step - rem_step * (self.shape_m1[i] + 1) - self.indices[i] += cur_step - self.offset += self.strides[i] * cur_step + indices[i] = idx + cur_step + offset += self.strides[i] * cur_step step = rem_step assert step > 0 + return IterState(index, indices, offset) - def done(self): - return self.index >= self.size + def done(self, state): + return state.index >= self.size - def getitem(self): - return self.array.getitem(self.offset) + def getitem(self, state): + return self.array.getitem(state.offset) - def getitem_bool(self): - return self.array.getitem_bool(self.offset) + def getitem_bool(self, state): + return self.array.getitem_bool(state.offset) - def setitem(self, elem): - self.array.setitem(self.offset, elem) + def setitem(self, state, elem): + self.array.setitem(state.offset, elem) def AxisIter(array, shape, axis, cumulative): diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -16,17 +16,18 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 3 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 3 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 5 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 5 + assert s.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -35,17 +36,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 9 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 9 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 1 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 1 + assert s.indices == [1,0] def test_iterator_step(self): #iteration in C order with #contiguous layout => strides[-1] is 1 @@ -56,22 +58,23 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 6 - assert not i.done() - assert i.indices == [1,1] + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 6 + assert not i.done(s) + assert s.indices == [1,1] #And for some big skips - i.next_skip_x(5) - assert i.offset == 11 - assert i.indices == [2,1] - i.next_skip_x(5) + s = i.next_skip_x(s, 5) + assert s.offset == 11 + assert s.indices == [2,1] + s = i.next_skip_x(s, 5) # Note: the offset does not overflow but recycles, # this is good for broadcast - assert i.offset == 1 - assert i.indices == [0,1] - assert i.done() + assert s.offset == 1 + assert s.indices == [0,1] + assert i.done(s) #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -80,17 +83,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 4 - assert i.indices == [1,1] - assert not i.done() - i.next_skip_x(5) - assert i.offset == 5 - assert i.indices == [2,1] - assert not i.done() - i.next_skip_x(5) - assert i.indices == [0,1] - assert i.offset == 3 - assert i.done() + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 4 + assert s.indices == [1,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.offset == 5 + assert s.indices == [2,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.indices == [0,1] + assert s.offset == 3 + assert i.done(s) From noreply at buildbot.pypy.org Fri Apr 18 00:09:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:09:08 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: change usages of iters to use state Message-ID: <20140417220908.9F68D1C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70724:71b86d3efc92 Date: 2014-04-17 16:31 -0400 http://bitbucket.org/pypy/pypy/changeset/71b86d3efc92/ Log: change usages of iters to use state diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -284,9 +284,11 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return ArrayIter(self, support.product(shape), shape, r[0], r[1]) - return ArrayIter(self, self.get_size(), self.shape, - self.strides, self.backstrides) + i = ArrayIter(self, support.product(shape), shape, r[0], r[1]) + else: + i = ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) + return i, i.reset() def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -156,10 +156,10 @@ "string is smaller than requested size")) a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) - ai = a.create_iter() + ai, state = a.create_iter() for val in items: - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) return space.wrap(a) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -32,23 +32,23 @@ self.reset() def reset(self): - self.iter = self.base.create_iter() + self.iter, self.state = self.base.create_iter() def descr_len(self, space): return space.wrap(self.base.get_size()) def descr_next(self, space): - if self.iter.done(): + if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) - w_res = self.iter.getitem() - self.iter.next() + w_res = self.iter.getitem(self.state) + self.state = self.iter.next(self.state) return w_res def descr_index(self, space): - return space.wrap(self.iter.index) + return space.wrap(self.state.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.iter.index)) + coords = self.base.to_coords(space, space.wrap(self.state.index)) return space.newtuple([space.wrap(c) for c in coords]) def descr_getitem(self, space, w_idx): @@ -58,13 +58,13 @@ self.reset() base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - base_iter = base.create_iter() - base_iter.next_skip_x(start) + base_iter, base_state = base.create_iter() + base_state = base_iter.next_skip_x(base_state, start) if length == 1: - return base_iter.getitem() + return base_iter.getitem(base_state) res = W_NDimArray.from_shape(space, [length], base.get_dtype(), base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, base_iter, step) + return loop.flatiter_getitem(res, base_iter, base_state, step) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -51,19 +51,20 @@ self.shapelen = len(shape) self.indexes = [0] * len(shape) self._done = False - self.idx_w = [None] * len(idx_w) + self.idx_w_i = [None] * len(idx_w) + self.idx_w_s = [None] * len(idx_w) for i, w_idx in enumerate(idx_w): if isinstance(w_idx, W_NDimArray): - self.idx_w[i] = w_idx.create_iter(shape) + self.idx_w_i[i], self.idx_w_s[i] = w_idx.create_iter(shape) def done(self): return self._done @jit.unroll_safe def next(self): - for w_idx in self.idx_w: - if w_idx is not None: - w_idx.next() + for i, idx_w_i in enumerate(self.idx_w_i): + if idx_w_i is not None: + self.idx_w_s[i] = idx_w_i.next(self.idx_w_s[i]) for i in range(self.shapelen - 1, -1, -1): if self.indexes[i] < self.shape[i] - 1: self.indexes[i] += 1 diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -12,11 +12,10 @@ AllButAxisIter -call2_driver = jit.JitDriver(name='numpy_call2', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter']) +call2_driver = jit.JitDriver( + name='numpy_call2', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority @@ -46,47 +45,40 @@ if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) - left_iter = w_lhs.create_iter(shape) - right_iter = w_rhs.create_iter(shape) - out_iter = out.create_iter(shape) + left_iter, left_state = w_lhs.create_iter(shape) + right_iter, right_state = w_rhs.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_lhs=w_lhs, w_rhs=w_rhs, - out=out, - left_iter=left_iter, right_iter=right_iter, - out_iter=out_iter) - w_left = left_iter.getitem().convert_to(space, calc_dtype) - w_right = right_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, w_left, w_right).convert_to( + calc_dtype=calc_dtype, res_dtype=res_dtype) + w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) + w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( space, res_dtype)) - left_iter.next() - right_iter.next() - out_iter.next() + left_state = left_iter.next(left_state) + right_state = right_iter.next(right_state) + out_state = out_iter.next(out_state) return out -call1_driver = jit.JitDriver(name='numpy_call1', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_obj', 'out', 'obj_iter', - 'out_iter']) +call1_driver = jit.JitDriver( + name='numpy_call1', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) - obj_iter = w_obj.create_iter(shape) - out_iter = out.create_iter(shape) + obj_iter, obj_state = w_obj.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_obj=w_obj, out=out, - obj_iter=obj_iter, out_iter=out_iter) - elem = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, elem).convert_to(space, res_dtype)) - out_iter.next() - obj_iter.next() + calc_dtype=calc_dtype, res_dtype=res_dtype) + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) return out setslice_driver = jit.JitDriver(name='numpy_setslice', @@ -96,18 +88,20 @@ def setslice(space, shape, target, source): # note that unlike everything else, target and source here are # array implementations, not arrays - target_iter = target.create_iter(shape) - source_iter = source.create_iter(shape) + target_iter, target_state = target.create_iter(shape) + source_iter, source_state = source.create_iter(shape) dtype = target.dtype shapelen = len(shape) - while not target_iter.done(): + while not target_iter.done(target_state): setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + val = source_iter.getitem(source_state) if dtype.is_str_or_unicode(): - target_iter.setitem(dtype.coerce(space, source_iter.getitem())) + val = dtype.coerce(space, val) else: - target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) - target_iter.next() - source_iter.next() + val = val.convert_to(space, dtype) + target_iter.setitem(target_state, val) + target_state = target_iter.next(target_state) + source_state = source_iter.next(source_state) return target reduce_driver = jit.JitDriver(name='numpy_reduce', @@ -116,22 +110,22 @@ reds = 'auto') def compute_reduce(space, obj, calc_dtype, func, done_func, identity): - obj_iter = obj.create_iter() + obj_iter, obj_state = obj.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, calc_dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval cur_value = func(calc_dtype, cur_value, rval) - obj_iter.next() + obj_state = obj_iter.next(obj_state) return cur_value reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', @@ -139,69 +133,76 @@ reds = 'auto') def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): - obj_iter = obj.create_iter() - out_iter = out.create_iter() + obj_iter, obj_state = obj.create_iter() + out_iter, out_state = out.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) def fill(arr, box): - arr_iter = arr.create_iter() - while not arr_iter.done(): - arr_iter.setitem(box) - arr_iter.next() + arr_iter, arr_state = arr.create_iter() + while not arr_iter.done(arr_state): + arr_iter.setitem(arr_state, box) + arr_state = arr_iter.next(arr_state) def assign(space, arr, seq): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() for item in seq: - arr_iter.setitem(arr_dtype.coerce(space, item)) - arr_iter.next() + arr_iter.setitem(arr_state, arr_dtype.coerce(space, item)) + arr_state = arr_iter.next(arr_state) where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') def where(space, out, shape, arr, x, y, dtype): - out_iter = out.create_iter(shape) - arr_iter = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) arr_dtype = arr.get_dtype() - x_iter = x.create_iter(shape) - y_iter = y.create_iter(shape) + x_iter, x_state = x.create_iter(shape) + y_iter, y_state = y.create_iter(shape) if x.is_scalar(): if y.is_scalar(): - iter = arr_iter + iter, state = arr_iter, arr_state else: - iter = y_iter + iter, state = y_iter, y_state else: - iter = x_iter + iter, state = x_iter, x_state shapelen = len(shape) - while not iter.done(): + while not iter.done(state): where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, arr_dtype=arr_dtype) - w_cond = arr_iter.getitem() + w_cond = arr_iter.getitem(arr_state) if arr_dtype.itemtype.bool(w_cond): - w_val = x_iter.getitem().convert_to(space, dtype) + w_val = x_iter.getitem(x_state).convert_to(space, dtype) else: - w_val = y_iter.getitem().convert_to(space, dtype) - out_iter.setitem(w_val) - out_iter.next() - arr_iter.next() - x_iter.next() - y_iter.next() + w_val = y_iter.getitem(y_state).convert_to(space, dtype) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) + x_state = x_iter.next(x_state) + y_state = y_iter.next(y_state) + if x.is_scalar(): + if y.is_scalar(): + state = arr_state + else: + state = y_state + else: + state = x_state return out axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', @@ -212,31 +213,36 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) + out_state = out_iter.reset() if cumulative: temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) + temp_state = temp_iter.reset() else: - temp_iter = out_iter # hack - arr_iter = arr.create_iter() + temp_iter = out_iter # hack + temp_state = out_state + arr_iter, arr_state = arr.create_iter() if identity is not None: identity = identity.convert_to(space, dtype) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - assert not arr_iter.done() - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.indices[axis] == 0: + assert not arr_iter.done(arr_state) + w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) + if out_state.indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) else: - cur = temp_iter.getitem() + cur = temp_iter.getitem(temp_state) w_val = func(dtype, cur, w_val) - out_iter.setitem(w_val) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) if cumulative: - temp_iter.setitem(w_val) - temp_iter.next() - arr_iter.next() - out_iter.next() + temp_iter.setitem(temp_state, w_val) + temp_state = temp_iter.next(temp_state) + else: + temp_state = out_state + arr_state = arr_iter.next(arr_state) return out @@ -249,18 +255,18 @@ result = 0 idx = 1 dtype = arr.get_dtype() - iter = arr.create_iter() - cur_best = iter.getitem() - iter.next() + iter, state = arr.create_iter() + cur_best = iter.getitem(state) + state = iter.next(state) shapelen = len(arr.get_shape()) - while not iter.done(): + while not iter.done(state): arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_val = iter.getitem() + w_val = iter.getitem(state) new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - iter.next() + state = iter.next(state) idx += 1 return result return argmin_argmax @@ -291,17 +297,19 @@ right_impl = right.implementation assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.create_iter() + outi, outs = result.create_iter() lefti = AllButAxisIter(left_impl, len(left_shape) - 1) righti = AllButAxisIter(right_impl, right_critical_dim) + lefts = lefti.reset() + rights = righti.reset() n = left_impl.shape[-1] s1 = left_impl.strides[-1] s2 = right_impl.strides[right_critical_dim] - while not lefti.done(): - while not righti.done(): - oval = outi.getitem() - i1 = lefti.offset - i2 = righti.offset + while not lefti.done(lefts): + while not righti.done(rights): + oval = outi.getitem(outs) + i1 = lefts.offset + i2 = rights.offset i = 0 while i < n: i += 1 @@ -311,11 +319,11 @@ oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) i1 += s1 i2 += s2 - outi.setitem(oval) - outi.next() - righti.next() - righti.reset() - lefti.next() + outi.setitem(outs, oval) + outs = outi.next(outs) + rights = righti.next(rights) + rights = righti.reset() + lefts = lefti.next(lefts) return result count_all_true_driver = jit.JitDriver(name = 'numpy_count', @@ -324,13 +332,13 @@ def count_all_true_concrete(impl): s = 0 - iter = impl.create_iter() + iter, state = impl.create_iter() shapelen = len(impl.shape) dtype = impl.dtype - while not iter.done(): + while not iter.done(state): count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - s += iter.getitem_bool() - iter.next() + s += iter.getitem_bool(state) + state = iter.next(state) return s def count_all_true(arr): @@ -344,18 +352,18 @@ reds = 'auto') def nonzero(res, arr, box): - res_iter = res.create_iter() - arr_iter = arr.create_iter() + res_iter, res_state = res.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) - while not arr_iter.done(): + while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) - if arr_iter.getitem_bool(): + if arr_iter.getitem_bool(arr_state): for d in dims: - res_iter.setitem(box(arr_iter.indices[d])) - res_iter.next() - arr_iter.next() + res_iter.setitem(res_state, box(arr_state.indices[d])) + res_state = res_iter.next(res_state) + arr_state = arr_iter.next(arr_state) return res @@ -365,26 +373,26 @@ reds = 'auto') def getitem_filter(res, arr, index): - res_iter = res.create_iter() + res_iter, res_state = res.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() - arr_iter = arr.create_iter() + index_iter, index_state = index.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() index_dtype = index.get_dtype() # XXX length of shape of index as well? - while not index_iter.done(): + while not index_iter.done(index_state): getitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - res_iter.setitem(arr_iter.getitem()) - res_iter.next() - index_iter.next() - arr_iter.next() + if index_iter.getitem_bool(index_state): + res_iter.setitem(res_state, arr_iter.getitem(arr_state)) + res_state = res_iter.next(res_state) + index_state = index_iter.next(index_state) + arr_state = arr_iter.next(arr_state) return res setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', @@ -393,41 +401,42 @@ reds = 'auto') def setitem_filter(space, arr, index, value): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() + index_iter, index_state = index.create_iter() if value.get_size() == 1: - value_iter = value.create_iter(arr.get_shape()) + value_iter, value_state = value.create_iter(arr.get_shape()) else: - value_iter = value.create_iter() + value_iter, value_state = value.create_iter() index_dtype = index.get_dtype() arr_dtype = arr.get_dtype() - while not index_iter.done(): + while not index_iter.done(index_state): setitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - arr_iter.setitem(arr_dtype.coerce(space, value_iter.getitem())) - value_iter.next() - arr_iter.next() - index_iter.next() + if index_iter.getitem_bool(index_state): + val = arr_dtype.coerce(space, value_iter.getitem(value_state)) + value_state = value_iter.next(value_state) + arr_iter.setitem(arr_state, val) + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], reds = 'auto') -def flatiter_getitem(res, base_iter, step): - ri = res.create_iter() +def flatiter_getitem(res, base_iter, base_state, step): + ri, rs = res.create_iter() dtype = res.get_dtype() - while not ri.done(): + while not ri.done(rs): flatiter_getitem_driver.jit_merge_point(dtype=dtype) - ri.setitem(base_iter.getitem()) - base_iter.next_skip_x(step) - ri.next() + ri.setitem(rs, base_iter.getitem(base_state)) + base_state = base_iter.next_skip_x(base_state, step) + rs = ri.next(rs) return res flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', @@ -436,19 +445,21 @@ def flatiter_setitem(space, arr, val, start, step, length): dtype = arr.get_dtype() - arr_iter = arr.create_iter() - val_iter = val.create_iter() - arr_iter.next_skip_x(start) + arr_iter, arr_state = arr.create_iter() + val_iter, val_state = val.create_iter() + arr_state = arr_iter.next_skip_x(arr_state, start) while length > 0: flatiter_setitem_driver.jit_merge_point(dtype=dtype) + val = val_iter.getitem(val_state) if dtype.is_str_or_unicode(): - arr_iter.setitem(dtype.coerce(space, val_iter.getitem())) + val = dtype.coerce(space, val) else: - arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) + val = val.convert_to(space, dtype) + arr_iter.setitem(arr_state, val) # need to repeat i_nput values until all assignments are done - arr_iter.next_skip_x(step) + arr_state = arr_iter.next_skip_x(arr_state, step) + val_state = val_iter.next(val_state) length -= 1 - val_iter.next() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], @@ -456,30 +467,30 @@ def fromstring_loop(space, a, dtype, itemsize, s): i = 0 - ai = a.create_iter() - while not ai.done(): + ai, state = a.create_iter() + while not ai.done(state): fromstring_driver.jit_merge_point(dtype=dtype, itemsize=itemsize) sub = s[i*itemsize:i*itemsize + itemsize] if dtype.is_str_or_unicode(): val = dtype.coerce(space, space.wrap(sub)) else: val = dtype.itemtype.runpack_str(space, sub) - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) i += 1 def tostring(space, arr): builder = StringBuilder() - iter = arr.create_iter() + iter, state = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) - while not iter.done(): - w_res_str.implementation.setitem(0, iter.getitem()) + while not iter.done(state): + w_res_str.implementation.setitem(0, iter.getitem(state)) for i in range(itemsize): builder.append(res_str_casted[i]) - iter.next() + state = iter.next(state) return builder.build() getitem_int_driver = jit.JitDriver(name = 'numpy_getitem_int', @@ -500,8 +511,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] res.descr_setitem(space, space.newtuple(prefix_w[:prefixlen] + @@ -528,8 +539,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space, @@ -547,13 +558,14 @@ def byteswap(from_, to): dtype = from_.dtype - from_iter = from_.create_iter() - to_iter = to.create_iter() - while not from_iter.done(): + from_iter, from_state = from_.create_iter() + to_iter, to_state = to.create_iter() + while not from_iter.done(from_state): byteswap_driver.jit_merge_point(dtype=dtype) - to_iter.setitem(dtype.itemtype.byteswap(from_iter.getitem())) - to_iter.next() - from_iter.next() + val = dtype.itemtype.byteswap(from_iter.getitem(from_state)) + to_iter.setitem(to_state, val) + to_state = to_iter.next(to_state) + from_state = from_iter.next(from_state) choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], @@ -561,13 +573,15 @@ def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) - iterators = [a.create_iter(shape) for a in choices] - arr_iter = arr.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + pairs = [a.create_iter(shape) for a in choices] + iterators = [i[0] for i in pairs] + states = [i[1] for i in pairs] + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, mode=mode) - index = support.index_w(space, arr_iter.getitem()) + index = support.index_w(space, arr_iter.getitem(arr_state)) if index < 0 or index >= len(iterators): if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( @@ -580,72 +594,73 @@ index = 0 else: index = len(iterators) - 1 - out_iter.setitem(iterators[index].getitem().convert_to(space, dtype)) - for iter in iterators: - iter.next() - out_iter.next() - arr_iter.next() + val = iterators[index].getitem(states[index]).convert_to(space, dtype) + out_iter.setitem(out_state, val) + for i in range(len(iterators)): + states[i] = iterators[i].next(states[i]) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def clip(space, arr, shape, min, max, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) dtype = out.get_dtype() shapelen = len(shape) - min_iter = min.create_iter(shape) - max_iter = max.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + min_iter, min_state = min.create_iter(shape) + max_iter, max_state = max.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) - w_min = min_iter.getitem().convert_to(space, dtype) - w_max = max_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) + w_min = min_iter.getitem(min_state).convert_to(space, dtype) + w_max = max_iter.getitem(max_state).convert_to(space, dtype) if dtype.itemtype.lt(w_v, w_min): w_v = w_min elif dtype.itemtype.gt(w_v, w_max): w_v = w_max - out_iter.setitem(w_v) - arr_iter.next() - max_iter.next() - out_iter.next() - min_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + min_state = min_iter.next(min_state) + max_state = max_iter.next(max_state) + out_state = out_iter.next(out_state) round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def round(space, arr, dtype, shape, decimals, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + while not arr_iter.done(arr_state): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) w_v = dtype.itemtype.round(w_v, decimals) - out_iter.setitem(w_v) - arr_iter.next() - out_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + out_state = out_iter.next(out_state) diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() i = 0 index = [0] * 2 while i < size: diagonal_simple_driver.jit_merge_point(axis1=axis1, axis2=axis2) index[axis1] = i index[axis2] = i + offset - out_iter.setitem(arr.getitem_index(space, index)) + out_iter.setitem(out_state, arr.getitem_index(space, index)) i += 1 - out_iter.next() + out_state = out_iter.next(out_state) def diagonal_array(space, arr, out, offset, axis1, axis2, shape): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 @@ -667,6 +682,6 @@ indexes = (iter.indexes[:a] + [last_index + offset] + iter.indexes[a:b] + [last_index] + iter.indexes[b:shapelen_minus_1]) - out_iter.setitem(arr.getitem_index(space, indexes)) + out_iter.setitem(out_state, arr.getitem_index(space, indexes)) iter.next() - out_iter.next() + out_state = out_iter.next(out_state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -260,24 +260,24 @@ return space.call_function(cache.w_array_str, self) def dump_data(self, prefix='array(', separator=',', suffix=')'): - i = self.create_iter() + i, state = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() s.append(prefix) if not self.is_scalar(): s.append('[') - while not i.done(): + while not i.done(state): if first: first = False else: s.append(separator) s.append(' ') if self.is_scalar() and dtype.is_str(): - s.append(dtype.itemtype.to_str(i.getitem())) + s.append(dtype.itemtype.to_str(i.getitem(state))) else: - s.append(dtype.itemtype.str_format(i.getitem())) - i.next() + s.append(dtype.itemtype.str_format(i.getitem(state))) + state = i.next(state) if not self.is_scalar(): s.append(']') s.append(suffix) @@ -818,8 +818,8 @@ if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - iter = self.create_iter() - return space.wrap(space.is_true(iter.getitem())) + iter, state = self.create_iter() + return space.wrap(space.is_true(iter.getitem(state))) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): @@ -1095,11 +1095,11 @@ builder = StringBuilder() if isinstance(self.implementation, SliceArray): - iter = self.implementation.create_iter() - while not iter.done(): - box = iter.getitem() + iter, state = self.implementation.create_iter() + while not iter.done(state): + box = iter.getitem(state) builder.append(box.raw_str()) - iter.next() + state = iter.next(state) else: builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -148,20 +148,22 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() index_impl = index_arr.implementation index_iter = AllButAxisIter(index_impl, axis) + index_state = index_iter.reset() stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): + while not arr_iter.done(arr_state): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + - index_iter.offset, i) + index_state.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, arr_iter.offset) + arr.get_storage(), storage, index_state.offset, arr_state.offset) ArgSort(r).sort() - arr_iter.next() - index_iter.next() + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) return index_arr return argsort @@ -292,12 +294,13 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) + while not arr_iter.done(arr_state): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_state.offset) ArgSort(r).sort() - arr_iter.next() + arr_state = arr_iter.next(arr_state) return sort diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -47,7 +47,8 @@ raise Exception("need results") w_res = interp.results[-1] if isinstance(w_res, W_NDimArray): - w_res = w_res.create_iter().getitem() + i, s = w_res.create_iter() + w_res = i.getitem(s) if isinstance(w_res, boxes.W_Float64Box): return w_res.value if isinstance(w_res, boxes.W_Int64Box): From noreply at buildbot.pypy.org Fri Apr 18 00:09:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:09:09 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: update test_zjit Message-ID: <20140417220909.D1FBE1C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70725:1a421d4ff1cd Date: 2014-04-17 17:32 -0400 http://bitbucket.org/pypy/pypy/changeset/1a421d4ff1cd/ Log: update test_zjit diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -102,7 +102,6 @@ self.check_simple_loop({ 'float_add': 1, 'getarrayitem_gc': 3, - 'getfield_gc': 7, 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 3, @@ -113,7 +112,6 @@ 'raw_load': 2, 'raw_store': 1, 'setarrayitem_gc': 3, - 'setfield_gc': 6, }) def define_pow(): @@ -133,7 +131,6 @@ 'float_mul': 2, 'float_ne': 1, 'getarrayitem_gc': 3, - 'getfield_gc': 7, 'guard_false': 4, 'guard_not_invalidated': 1, 'guard_true': 5, @@ -145,7 +142,6 @@ 'raw_load': 2, 'raw_store': 1, 'setarrayitem_gc': 3, - 'setfield_gc': 6, }) def define_sum(): @@ -528,7 +524,6 @@ self.check_trace_count(1) self.check_simple_loop({ 'getarrayitem_gc': 2, - 'getfield_gc': 4, 'guard_not_invalidated': 1, 'guard_true': 3, 'int_add': 6, @@ -539,7 +534,6 @@ 'raw_load': 1, 'raw_store': 1, 'setarrayitem_gc': 2, - 'setfield_gc': 4, }) def define_dot(): @@ -565,29 +559,30 @@ 'raw_load': 2, }) self.check_resops({ + 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 35, - 'getfield_gc_pure': 39, + 'getfield_gc': 21, + 'getfield_gc_pure': 31, 'guard_class': 4, 'guard_false': 14, - 'guard_nonnull': 12, - 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 13, - 'guard_value': 4, 'int_add': 25, 'int_ge': 4, 'int_le': 8, 'int_lt': 11, 'int_sub': 4, 'jump': 3, + 'new_array': 1, + 'new_with_vtable': 7, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 10, - 'setfield_gc': 14, + 'same_as': 2, + 'setarrayitem_gc': 8, + 'setfield_gc': 16, }) def define_argsort(): From noreply at buildbot.pypy.org Fri Apr 18 00:09:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:09:11 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: merge default Message-ID: <20140417220911.0F1971C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70726:0f3c4c2936ca Date: 2014-04-17 17:41 -0400 http://bitbucket.org/pypy/pypy/changeset/0f3c4c2936ca/ Log: merge default diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -996,7 +996,8 @@ descr_cumsum = _reduce_ufunc_impl('add', cumulative=True) descr_cumprod = _reduce_ufunc_impl('multiply', cumulative=True) - def _reduce_argmax_argmin_impl(op_name): + def _reduce_argmax_argmin_impl(raw_name): + op_name = "arg%s" % raw_name def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): raise oefmt(space.w_NotImplementedError, @@ -1007,18 +1008,17 @@ if self.get_size() == 0: raise oefmt(space.w_ValueError, "Can't call %s on zero-size arrays", op_name) - op = getattr(loop, op_name) try: - res = op(self) + getattr(self.get_dtype().itemtype, raw_name) except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', op_name, self.get_dtype().get_name()) - return space.wrap(res) - return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) + return space.wrap(getattr(loop, op_name)(self)) + return func_with_new_name(impl, "reduce_%s_impl" % op_name) - descr_argmax = _reduce_argmax_argmin_impl("argmax") - descr_argmin = _reduce_argmax_argmin_impl("argmin") + descr_argmax = _reduce_argmax_argmin_impl("max") + descr_argmin = _reduce_argmax_argmin_impl("min") def descr_int(self, space): if self.get_size() != 1: From noreply at buildbot.pypy.org Fri Apr 18 00:09:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 00:09:12 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: mark iter state fields immutable Message-ID: <20140417220912.393971C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70727:5edc38cf52ad Date: 2014-04-17 17:44 -0400 http://bitbucket.org/pypy/pypy/changeset/5edc38cf52ad/ Log: mark iter state fields immutable diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -80,6 +80,8 @@ class IterState(object): + _immutable_fields_ = ['index', 'indices[*]', 'offset'] + def __init__(self, index, indices, offset): self.index = index self.indices = indices diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -564,8 +564,8 @@ 'float_mul': 2, 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 21, - 'getfield_gc_pure': 31, + 'getfield_gc': 8, + 'getfield_gc_pure': 44, 'guard_class': 4, 'guard_false': 14, 'guard_not_invalidated': 2, From noreply at buildbot.pypy.org Fri Apr 18 00:33:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 18 Apr 2014 00:33:18 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: add tests Message-ID: <20140417223318.1428E1C048F@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70728:11986490eee7 Date: 2014-04-18 01:28 +0300 http://bitbucket.org/pypy/pypy/changeset/11986490eee7/ Log: add tests diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -341,7 +341,7 @@ assert (x == y).all() def test_string_mergesort(self): - import numpypy as np + import numpy as np import sys x = np.array(['a'] * 32) if '__pypy__' in sys.builtin_module_names: @@ -349,3 +349,17 @@ assert 'non-numeric types' in exc.value.message else: assert (x.argsort(kind='m') == np.arange(32)).all() + + def test_searchsort(self): + from numpy import arange + import sys + a = arange(1, 6) + ret = a.searchsorted(3) + assert ret == 2 + ret = a.searchsorted(3, side='right') + assert ret == 3 + ret = a.searchsorted([-10, 10, 2, 3]) + assert (ret == [0, 5, 1, 2]).all() + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, "a.searchsorted(3, sorter=range(6)") + From noreply at buildbot.pypy.org Fri Apr 18 00:44:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 00:44:44 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 the relative import Message-ID: <20140417224444.452611C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70729:527abf1156de Date: 2014-04-17 12:16 -0700 http://bitbucket.org/pypy/pypy/changeset/527abf1156de/ Log: 2to3 the relative import diff --git a/pypy/module/__pypy__/app_signal.py b/pypy/module/__pypy__/app_signal.py --- a/pypy/module/__pypy__/app_signal.py +++ b/pypy/module/__pypy__/app_signal.py @@ -1,4 +1,4 @@ -import thread +from . import thread # ^^ relative import of __pypy__.thread. Note that some tests depend on # this (test_enable_signals in test_signal.py) to work properly, # otherwise they get caught in some deadlock waiting for the import From noreply at buildbot.pypy.org Fri Apr 18 01:30:55 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 01:30:55 +0200 (CEST) Subject: [pypy-commit] pypy default: minor cleanup/refactoring for the sake of py3k Message-ID: <20140417233055.35BC71D282B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70730:281b05c23a4b Date: 2014-04-17 16:27 -0700 http://bitbucket.org/pypy/pypy/changeset/281b05c23a4b/ Log: minor cleanup/refactoring for the sake of py3k diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -3,15 +3,18 @@ indirection is introduced to make the version tag change less often. """ +from rpython.rlib import jit, rerased + from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy, _never_equal_to_string -from pypy.objspace.std.dictmultiobject import ObjectDictStrategy -from rpython.rlib import jit, rerased +from pypy.objspace.std.dictmultiobject import ( + DictStrategy, ObjectDictStrategy, _never_equal_to_string, + create_iterator_classes) + class VersionTag(object): pass + class ModuleCell(W_Root): def __init__(self, w_value=None): self.w_value = w_value @@ -19,11 +22,17 @@ def __repr__(self): return "" % (self.w_value, ) + def unwrap_cell(w_value): if isinstance(w_value, ModuleCell): return w_value.w_value return w_value + +def _wrapkey(space, key): + return space.wrap(key) + + class ModuleDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("modulecell") @@ -55,7 +64,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space if space.is_w(space.type(w_key), space.w_str): - self.setitem_str(w_dict, self.space.str_w(w_key), w_value) + self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) w_dict.setitem(w_key, w_value) @@ -66,8 +75,8 @@ cell.w_value = w_value return if cell is not None: - # If the new value and the current value are the same, don't create a - # level of indirection, or mutate the version. + # If the new value and the current value are the same, don't + # create a level of indirection, or mutate the version. if self.space.is_w(w_value, cell): return w_value = ModuleCell(w_value) @@ -121,8 +130,8 @@ return w_dict.getitem(w_key) def getitem_str(self, w_dict, key): - w_res = self.getdictvalue_no_unwrapping(w_dict, key) - return unwrap_cell(w_res) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + return unwrap_cell(cell) def w_keys(self, w_dict): space = self.space @@ -136,37 +145,43 @@ def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([space.wrap(key), unwrap_cell(cell)]) - for key, cell in iterator()] + return [space.newtuple([_wrapkey(space, key), unwrap_cell(cell)]) + for key, cell in iterator()] def clear(self, w_dict): self.unerase(w_dict.dstorage).clear() self.mutated() def popitem(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - key, w_value = d.popitem() + key, cell = d.popitem() self.mutated() - return self.space.wrap(key), unwrap_cell(w_value) + return _wrapkey(space, key), unwrap_cell(cell) def switch_to_object_strategy(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - strategy = self.space.fromcache(ObjectDictStrategy) + strategy = space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - d_new[self.space.wrap(key)] = unwrap_cell(cell) + d_new[_wrapkey(space, key)] = unwrap_cell(cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): return self.unerase(w_dict.dstorage).iterkeys() + def getitervalues(self, w_dict): return self.unerase(w_dict.dstorage).itervalues() + def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() - def wrapkey(space, key): - return space.wrap(key) + + wrapkey = _wrapkey + def wrapvalue(space, value): return unwrap_cell(value) + create_iterator_classes(ModuleDictStrategy) diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -1,12 +1,19 @@ -## ---------------------------------------------------------------------------- -## dict strategy (see dictmultiobject.py) +"""dict implementation specialized for keyword argument dicts. -from rpython.rlib import rerased, jit +Based on two lists containing unwrapped key value pairs. +""" + +from rpython.rlib import jit, rerased + from pypy.objspace.std.dictmultiobject import ( BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, create_iterator_classes) +def _wrapkey(space, key): + return space.wrap(key) + + class EmptyKwargsDictStrategy(EmptyDictStrategy): def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) @@ -21,7 +28,7 @@ unerase = staticmethod(unerase) def wrap(self, key): - return self.space.wrap(key) + return _wrapkey(self.space, key) def unwrap(self, wrapped): return self.space.str_w(wrapped) @@ -117,16 +124,14 @@ def items(self, w_dict): space = self.space keys, values_w = self.unerase(w_dict.dstorage) - result = [] - for i in range(len(keys)): - result.append(space.newtuple([self.wrap(keys[i]), values_w[i]])) - return result + return [space.newtuple([self.wrap(keys[i]), values_w[i]]) + for i in range(len(keys))] def popitem(self, w_dict): keys, values_w = self.unerase(w_dict.dstorage) key = keys.pop() w_value = values_w.pop() - return (self.wrap(key), w_value) + return self.wrap(key), w_value def clear(self, w_dict): w_dict.dstorage = self.get_empty_storage() @@ -164,17 +169,15 @@ keys = self.unerase(w_dict.dstorage)[0] return iter(range(len(keys))) - def wrapkey(space, key): - return space.wrap(key) + wrapkey = _wrapkey def next_item(self): strategy = self.strategy assert isinstance(strategy, KwargsDictStrategy) for i in self.iterator: - keys, values_w = strategy.unerase( - self.dictimplementation.dstorage) - return self.space.wrap(keys[i]), values_w[i] + keys, values_w = strategy.unerase(self.dictimplementation.dstorage) + return _wrapkey(self.space, keys[i]), values_w[i] else: return None, None From noreply at buildbot.pypy.org Fri Apr 18 02:43:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:02 +0200 (CEST) Subject: [pypy-commit] pypy default: use FakeString everywhere to be more consistent Message-ID: <20140418004302.05AD21C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70731:c797491b7fc4 Date: 2014-04-17 16:52 -0700 http://bitbucket.org/pypy/pypy/changeset/c797491b7fc4/ Log: use FakeString everywhere to be more consistent diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,42 +1,47 @@ import py + +from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject -from pypy.objspace.std.celldict import ModuleCell, ModuleDictStrategy -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, \ - BaseTestRDictImplementation, BaseTestDevolvedDictImplementation -from pypy.interpreter import gateway +from pypy.objspace.std.test.test_dictmultiobject import ( + BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, + FakeString) space = FakeSpace() class TestCellDict(object): + FakeString = FakeString + def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() d = W_DictMultiObject(space, strategy, storage) v1 = strategy.version - d.setitem("a", 1) + key = "a" + w_key = self.FakeString(key) + d.setitem(w_key, 1) v2 = strategy.version assert v1 is not v2 - assert d.getitem("a") == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, "a") == 1 + assert d.getitem(w_key) == 1 + assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 - d.setitem("a", 2) + d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 - assert d.getitem("a") == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 2 + assert d.getitem(w_key) == 2 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 - d.setitem("a", 3) + d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 - assert d.getitem("a") == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 3 + assert d.getitem(w_key) == 3 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 - d.delitem("a") + d.delitem(w_key) v5 = strategy.version assert v5 is not v4 - assert d.getitem("a") is None - assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + assert d.getitem(w_key) is None + assert d.strategy.getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1254,12 +1254,13 @@ return other == "s" d = self.get_impl() - d.setitem("s", 12) - assert d.getitem("s") == 12 - assert d.getitem(F()) == d.getitem("s") + w_key = FakeString("s") + d.setitem(w_key, 12) + assert d.getitem(w_key) == 12 + assert d.getitem(F()) == d.getitem(w_key) d = self.get_impl() - x = d.setdefault("s", 12) + x = d.setdefault(w_key, 12) assert x == 12 x = d.setdefault(F(), 12) assert x == 12 @@ -1269,10 +1270,10 @@ assert x == 12 d = self.get_impl() - d.setitem("s", 12) + d.setitem(w_key, 12) d.delitem(F()) - assert "s" not in d.w_keys() + assert w_key not in d.w_keys() assert F() not in d.w_keys() class TestBytesDictImplementation(BaseTestRDictImplementation): From noreply at buildbot.pypy.org Fri Apr 18 02:43:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:07 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140418004307.D14EA1C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70734:f10a4d4b1837 Date: 2014-04-17 16:53 -0700 http://bitbucket.org/pypy/pypy/changeset/f10a4d4b1837/ Log: merge default diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,42 +1,47 @@ import py + +from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject -from pypy.objspace.std.celldict import ModuleCell, ModuleDictStrategy -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, \ - BaseTestRDictImplementation, BaseTestDevolvedDictImplementation -from pypy.interpreter import gateway +from pypy.objspace.std.test.test_dictmultiobject import ( + BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, + FakeString) space = FakeSpace() class TestCellDict(object): + FakeString = FakeString + def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() d = W_DictMultiObject(space, strategy, storage) v1 = strategy.version - d.setitem("a", 1) + key = "a" + w_key = self.FakeString(key) + d.setitem(w_key, 1) v2 = strategy.version assert v1 is not v2 - assert d.getitem("a") == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, "a") == 1 + assert d.getitem(w_key) == 1 + assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 - d.setitem("a", 2) + d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 - assert d.getitem("a") == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 2 + assert d.getitem(w_key) == 2 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 - d.setitem("a", 3) + d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 - assert d.getitem("a") == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 3 + assert d.getitem(w_key) == 3 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 - d.delitem("a") + d.delitem(w_key) v5 = strategy.version assert v5 is not v4 - assert d.getitem("a") is None - assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + assert d.getitem(w_key) is None + assert d.strategy.getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1274,12 +1274,13 @@ return other == "s" d = self.get_impl() - d.setitem("s", 12) - assert d.getitem("s") == 12 - assert d.getitem(F()) == d.getitem("s") + w_key = FakeString("s") + d.setitem(w_key, 12) + assert d.getitem(w_key) == 12 + assert d.getitem(F()) == d.getitem(w_key) d = self.get_impl() - x = d.setdefault("s", 12) + x = d.setdefault(w_key, 12) assert x == 12 x = d.setdefault(F(), 12) assert x == 12 @@ -1289,10 +1290,10 @@ assert x == 12 d = self.get_impl() - d.setitem("s", 12) + d.setitem(w_key, 12) d.delitem(F()) - assert "s" not in d.w_keys() + assert w_key not in d.w_keys() assert F() not in d.w_keys() class TestBytesDictImplementation(BaseTestRDictImplementation): From noreply at buildbot.pypy.org Fri Apr 18 02:43:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: merge py3k Message-ID: <20140418004309.01C231C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70735:a0becb7c56be Date: 2014-04-17 16:53 -0700 http://bitbucket.org/pypy/pypy/changeset/a0becb7c56be/ Log: merge py3k diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,42 +1,47 @@ import py + +from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject -from pypy.objspace.std.celldict import ModuleCell, ModuleDictStrategy -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, \ - BaseTestRDictImplementation, BaseTestDevolvedDictImplementation -from pypy.interpreter import gateway +from pypy.objspace.std.test.test_dictmultiobject import ( + BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, + FakeString) space = FakeSpace() class TestCellDict(object): + FakeString = FakeString + def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() d = W_DictMultiObject(space, strategy, storage) v1 = strategy.version - d.setitem("a", 1) + key = "a" + w_key = self.FakeString(key) + d.setitem(w_key, 1) v2 = strategy.version assert v1 is not v2 - assert d.getitem("a") == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, "a") == 1 + assert d.getitem(w_key) == 1 + assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 - d.setitem("a", 2) + d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 - assert d.getitem("a") == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 2 + assert d.getitem(w_key) == 2 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 - d.setitem("a", 3) + d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 - assert d.getitem("a") == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 3 + assert d.getitem(w_key) == 3 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 - d.delitem("a") + d.delitem(w_key) v5 = strategy.version assert v5 is not v4 - assert d.getitem("a") is None - assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + assert d.getitem(w_key) is None + assert d.strategy.getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1293,12 +1293,13 @@ return other == "s" d = self.get_impl() - d.setitem("s", 12) - assert d.getitem("s") == 12 - assert d.getitem(F()) == d.getitem("s") + w_key = FakeString("s") + d.setitem(w_key, 12) + assert d.getitem(w_key) == 12 + assert d.getitem(F()) == d.getitem(w_key) d = self.get_impl() - x = d.setdefault("s", 12) + x = d.setdefault(w_key, 12) assert x == 12 x = d.setdefault(F(), 12) assert x == 12 @@ -1308,10 +1309,10 @@ assert x == 12 d = self.get_impl() - d.setitem("s", 12) + d.setitem(w_key, 12) d.delitem(F()) - assert "s" not in d.w_keys() + assert w_key not in d.w_keys() assert F() not in d.w_keys() class TestUnicodeDictImplementation(BaseTestRDictImplementation): From noreply at buildbot.pypy.org Fri Apr 18 02:43:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140418004303.A595C1C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70732:a118df0911b3 Date: 2014-04-17 16:32 -0700 http://bitbucket.org/pypy/pypy/changeset/a118df0911b3/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -137,3 +137,6 @@ .. branch: issue1514 Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -480,6 +480,8 @@ return w_mod2 self.setitem(w_modules, w_name, w_mod) w_mod.init(self) + else: + self.setitem(w_modules, w_name, w_mod) return w_mod def get_builtinmodule_to_install(self): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,7 +2,7 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat +import sys, os, stat, genericpath from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -528,7 +528,8 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - if os.path.isdir(filepart) and case_ok(filepart): + # os.path.isdir on win32 is not rpython when pywin32 installed + if genericpath.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -661,17 +661,17 @@ def test_reimport_builtin(self): import imp, sys, time oldpath = sys.path - time.tzset = "" + time.tzname = "" del sys.modules['time'] import time as time1 assert sys.modules['time'] is time1 - assert time.tzset == "" + assert time.tzname == "" - imp.reload(time1) # don't leave a broken time.tzset behind + imp.reload(time1) # don't leave a broken time.tzname behind import time - assert time.tzset != "" + assert time.tzname != "" def test_reload_infinite(self): import infinite_reload diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -37,7 +37,7 @@ "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -388,6 +388,8 @@ w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': w_res = w_lhs.descr_sub(interp.space, w_rhs) + elif self.name == '**': + w_res = w_lhs.descr_pow(interp.space, w_rhs) elif self.name == '->': if isinstance(w_rhs, FloatObject): w_rhs = IntObject(int(w_rhs.floatval)) @@ -596,6 +598,8 @@ arg = self.args[1].execute(interp) if self.name == 'view': w_res = arr.descr_view(interp.space, arg) + elif self.name == 'astype': + w_res = arr.descr_astype(interp.space, arg) else: assert False else: @@ -620,7 +624,7 @@ (':', 'colon'), ('\w+', 'identifier'), ('\]', 'array_right'), - ('(->)|[\+\-\*\/]', 'operator'), + ('(->)|[\+\-\*\/]+', 'operator'), ('=', 'assign'), (',', 'comma'), ('\|', 'pipe'), diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -90,11 +91,16 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.reset() + self.index = 0 + self.indices = [0] * len(shape) + self.offset = array.start + + @jit.unroll_safe def reset(self): self.index = 0 - self.indices = [0] * len(self.shape_m1) + for i in xrange(self.ndim_m1, -1, -1): + self.indices[i] = 0 self.offset = self.array.start @jit.unroll_safe @@ -143,6 +149,39 @@ self.array.setitem(self.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal + + def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -996,7 +996,8 @@ descr_cumsum = _reduce_ufunc_impl('add', cumulative=True) descr_cumprod = _reduce_ufunc_impl('multiply', cumulative=True) - def _reduce_argmax_argmin_impl(op_name): + def _reduce_argmax_argmin_impl(raw_name): + op_name = "arg%s" % raw_name def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): raise oefmt(space.w_NotImplementedError, @@ -1007,18 +1008,17 @@ if self.get_size() == 0: raise oefmt(space.w_ValueError, "Can't call %s on zero-size arrays", op_name) - op = getattr(loop, op_name) try: - res = op(self) + getattr(self.get_dtype().itemtype, raw_name) except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', op_name, self.get_dtype().get_name()) - return space.wrap(res) - return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) + return space.wrap(getattr(loop, op_name)(self)) + return func_with_new_name(impl, "reduce_%s_impl" % op_name) - descr_argmax = _reduce_argmax_argmin_impl("argmax") - descr_argmin = _reduce_argmax_argmin_impl("argmin") + descr_argmax = _reduce_argmax_argmin_impl("max") + descr_argmin = _reduce_argmax_argmin_impl("min") def descr_int(self, space): if self.get_size() != 1: diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/nditer.py @@ -0,0 +1,595 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.error import OperationError +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy import ufuncs, support + + +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, space, array): + raise NotImplementedError("Abstract Class") + +class IteratorMixin(object): + _mixin_ = True + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item[self.index](space, array, self.it) + + def setitem(self, space, array, val): + xxx + +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 + +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 + +def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + ret = [] + if space.is_w(w_op_flags, space.w_None): + for i in range(n): + ret.append(OpFlag()) + elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + else: + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) + return ret + +class OpFlag(object): + def __init__(self): + self.rw = 'r' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + self.get_it_item = (get_readonly_item, get_readonly_slice) + +def get_readonly_item(space, array, it): + return space.wrap(it.getitem()) + +def get_readwrite_item(space, array, it): + #create a single-value view (since scalars are not views) + res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) + #it.dtype.setitem(res, 0, it.getitem()) + return W_NDimArray(res) + +def get_readonly_slice(space, array, it): + return W_NDimArray(it.getslice().readonly()) + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + +def parse_op_flag(space, lst): + op_flag = OpFlag() + for w_item in lst: + item = space.str_w(w_item) + if item == 'readonly': + op_flag.rw = 'r' + elif item == 'readwrite': + op_flag.rw = 'rw' + elif item == 'writeonly': + op_flag.rw = 'w' + elif item == 'no_broadcast': + op_flag.broadcast = False + elif item == 'contig': + op_flag.force_contig = True + elif item == 'aligned': + op_flag.force_align = True + elif item == 'nbo': + op_flag.native_byte_order = True + elif item == 'copy': + op_flag.tmp_copy = 'r' + elif item == 'updateifcopy': + op_flag.tmp_copy = 'rw' + elif item == 'allocate': + op_flag.allocate = True + elif item == 'no_subtype': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"no_subtype" op_flag not implemented yet')) + elif item == 'arraymask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"arraymask" op_flag not implemented yet')) + elif item == 'writemask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"writemask" op_flag not implemented yet')) + else: + raise OperationError(space.w_ValueError, space.wrap( + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == 'r': + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) + elif op_flag.rw == 'rw': + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + return op_flag + +def parse_func_flags(space, nditer, w_flags): + if space.is_w(w_flags, space.w_None): + return + elif not space.isinstance_w(w_flags, space.w_tuple) and not \ + space.isinstance_w(w_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + 'Iter global flags must be a list or tuple of strings')) + lst = space.listview(w_flags) + for w_item in lst: + if not space.isinstance_w(w_item, space.w_str) and not \ + space.isinstance_w(w_item, space.w_unicode): + typename = space.type(w_item).getname(space) + raise OperationError(space.w_TypeError, space.wrap( + 'expected string or Unicode object, %s found' % typename)) + item = space.str_w(w_item) + if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) + nditer.external_loop = True + elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) + # For numpy compatability + nditer.buffered = True + elif item == 'c_index': + nditer.tracked_index = 'C' + elif item == 'f_index': + nditer.tracked_index = 'F' + elif item == 'multi_index': + nditer.tracked_index = 'multi' + elif item == 'common_dtype': + nditer.common_dtype = True + elif item == 'delay_bufalloc': + nditer.delay_bufalloc = True + elif item == 'grow_inner': + nditer.grow_inner = True + elif item == 'ranged': + nditer.ranged = True + elif item == 'refs_ok': + nditer.refs_ok = True + elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) + nditer.reduce_ok = True + elif item == 'zerosize_ok': + nditer.zerosize_ok = True + else: + raise OperationError(space.w_ValueError, space.wrap( + 'Unexpected iterator global flag "%s"' % item)) + if nditer.tracked_index and nditer.external_loop: + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order =='F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation + backward = is_backward(imp, order) + if arr.is_scalar(): + return ArrayIter(imp, 1, [], [], []) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret + +class W_NDIter(W_Root): + + def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, + w_op_axes, w_itershape, w_buffersize, order): + self.order = order + self.external_loop = False + self.buffered = False + self.tracked_index = '' + self.common_dtype = False + self.delay_bufalloc = False + self.grow_inner = False + self.ranged = False + self.refs_ok = False + self.reduce_ok = False + self.zerosize_ok = False + self.index_iter = None + self.done = False + self.first_next = True + self.op_axes = [] + # convert w_seq operands to a list of W_NDimArray + if space.isinstance_w(w_seq, space.w_tuple) or \ + space.isinstance_w(w_seq, space.w_list): + w_seq_as_list = space.listview(w_seq) + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + else: + self.seq =[convert_to_array(space, w_seq)] + + parse_func_flags(space, self, w_flags) + self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, + len(self.seq), parse_op_flag) + # handle w_op_axes + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) + + # handle w_op_dtypes part 1: creating self.dtypes list from input + if not space.is_none(w_op_dtypes): + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] + + # handle None or writable operands, calculate my shape + self.iters=[] + outargs = [i for i in range(len(self.seq)) \ + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation + if len(self.dtypes) > 0: + out_dtype = self.dtypes[outargs[0]] + else: + out_dtype = None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype(space, + self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) + + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + if self.tracked_index == "multi": + backward = False + else: + backward = self.order != self.tracked_index + self.index_iter = IndexIterator(iter_shape, backward=backward) + + # handle w_op_dtypes part 2: copy where needed if possible + if len(self.dtypes) > 0: + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d: + if not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering for operand %d" % i)) + impl = self.seq[i].implementation + new_impl = impl.astype(space, selfd) + self.seq[i] = W_NDimArray(new_impl) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] + + # create an iterator for each operand + if self.external_loop: + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + def set_op_axes(self, space, w_op_axes): + if space.len_w(w_op_axes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + op_axes = space.listview(w_op_axes) + l = -1 + for w_axis in op_axes: + if not space.is_none(w_axis): + axis_len = space.len_w(w_axis) + if l == -1: + l = axis_len + elif axis_len != l: + raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + if l == -1: + raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 + + def descr_iter(self, space): + return space.wrap(self) + + def descr_getitem(self, space, w_idx): + idx = space.int_w(w_idx) + try: + ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + except IndexError: + raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + return ret + + def descr_setitem(self, space, w_idx, w_value): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_len(self, space): + space.wrap(len(self.iters)) + + def descr_next(self, space): + for it in self.iters: + if not it.done(): + break + else: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + res = [] + if self.index_iter: + if not self.first_next: + self.index_iter.next() + else: + self.first_next = False + for i in range(len(self.iters)): + res.append(self.iters[i].getitem(space, self.seq[i])) + self.iters[i].next() + if len(res) <2: + return res[0] + return space.newtuple(res) + + def iternext(self): + if self.index_iter: + self.index_iter.next() + for i in range(len(self.iters)): + self.iters[i].next() + for it in self.iters: + if not it.done(): + break + else: + self.done = True + return self.done + return self.done + + def descr_iternext(self, space): + return space.wrap(self.iternext()) + + def descr_copy(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_debug_print(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_enable_external_loop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + @unwrap_spec(axis=int) + def descr_remove_axis(self, space, axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_remove_multi_index(self, space, w_multi_index): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_reset(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_operands(self, space): + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) + + def descr_get_dtypes(self, space): + res = [None] * len(self.seq) + for i in range(len(self.seq)): + res[i] = self.seq[i].descr_get_dtype(space) + return space.newtuple(res) + + def descr_get_finished(self, space): + return space.wrap(self.done) + + def descr_get_has_delayed_bufalloc(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_has_index(self, space): + return space.wrap(self.tracked_index in ["C", "F"]) + + def descr_get_index(self, space): + if not self.tracked_index in ["C", "F"]: + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.wrap(self.index_iter.getvalue()) + + def descr_get_has_multi_index(self, space): + return space.wrap(self.tracked_index == "multi") + + def descr_get_multi_index(self, space): + if not self.tracked_index == "multi": + raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.newtuple([space.wrap(x) for x in self.index_iter.index]) + + def descr_get_iterationneedsapi(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_iterindex(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itersize(self, space): + return space.wrap(support.product(self.shape)) + + def descr_get_itviews(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_ndim(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_nop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_shape(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_value(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + + at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes = WrappedDefault(None), order=str, + w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) +def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order='K'): + return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order) + +W_NDIter.typedef = TypeDef( + 'nditer', + __iter__ = interp2app(W_NDIter.descr_iter), + __getitem__ = interp2app(W_NDIter.descr_getitem), + __setitem__ = interp2app(W_NDIter.descr_setitem), + __len__ = interp2app(W_NDIter.descr_len), + + next = interp2app(W_NDIter.descr_next), + iternext = interp2app(W_NDIter.descr_iternext), + copy = interp2app(W_NDIter.descr_copy), + debug_print = interp2app(W_NDIter.descr_debug_print), + enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), + remove_axis = interp2app(W_NDIter.descr_remove_axis), + remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), + reset = interp2app(W_NDIter.descr_reset), + + operands = GetSetProperty(W_NDIter.descr_get_operands), + dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), + finished = GetSetProperty(W_NDIter.descr_get_finished), + has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), + has_index = GetSetProperty(W_NDIter.descr_get_has_index), + index = GetSetProperty(W_NDIter.descr_get_index), + has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), + multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), + iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), + iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), + itersize = GetSetProperty(W_NDIter.descr_get_itersize), + itviews = GetSetProperty(W_NDIter.descr_get_itviews), + ndim = GetSetProperty(W_NDIter.descr_get_ndim), + nop = GetSetProperty(W_NDIter.descr_get_nop), + shape = GetSetProperty(W_NDIter.descr_get_shape), + value = GetSetProperty(W_NDIter.descr_get_value), +) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -319,3 +319,14 @@ ''') results = interp.results[0] assert isinstance(results, W_NDimArray) + + def test_astype_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = astype(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + assert results.get_dtype().is_int() diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -0,0 +1,302 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNDIter(BaseNumpyAppTest): + def test_basic(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + + for x in nditer(a.T): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + + def test_order(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a, order='C'): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + for x in nditer(a, order='F'): + r.append(x) + assert r == [0, 3, 1, 4, 2, 5] + + def test_readwrite(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + for x in nditer(a, op_flags=['readwrite']): + x[...] = 2 * x + assert (a == [[0, 2, 4], [6, 8, 10]]).all() + + def test_external_loop(self): + from numpy import arange, nditer, array + a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + r = [] + n = 0 + for x in nditer(a, flags=['external_loop']): + r.append(x) + n += 1 + assert n == 1 + assert (array(r) == range(24)).all() + r = [] + n = 0 + for x in nditer(a, flags=['external_loop'], order='F'): + r.append(x) + n += 1 + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + + def test_index(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + exc = None + try: + it.index + except ValueError, e: + exc = e + assert exc + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + + def test_interface(self): + from numpy import arange, nditer, zeros + import sys + a = arange(6).reshape(2,3) + r = [] + it = nditer(a, flags=['f_index']) + while not it.finished: + r.append((it[0], it.index)) + it.iternext() + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') + while not it.finished: + it[0] = it.multi_index[1] - it.multi_index[0] + it.iternext() + assert (a == [[0, 1, 2], [-1, 0, 1]]).all() + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + + def test_buffered(self): + from numpy import arange, nditer, array + a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') + r = [] + for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() + + def test_op_dtype(self): + from numpy import arange, nditer, sqrt, array + a = arange(6).reshape(2,3) - 3 + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) + assert str(exc.value).startswith("Iterator operand required copying or buffering") + r = [] + for x in nditer(a, op_flags=['readonly','copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + r = [] + for x in nditer(a, op_flags=['copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], + op_dtypes = ['int64', 'int64', 'float64'], + op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) + for a, b, c in multi: + a[...] = b * c + assert (multi.operands[0] == [4, 6]).all() + + def test_casting(self): + from numpy import arange, nditer + import sys + a = arange(6.) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], + casting='same_kind'): + r.append(x) + assert r == [0., 1., 2., 3., 4., 5.] + exc = raises(TypeError, nditer, a, flags=['buffered'], + op_dtypes=['int32'], casting='same_kind') + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + b = arange(6) + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + op_flags=['readwrite'], casting='same_kind') + assert str(exc.value).startswith("Iterator requested dtype could not be cast") + + def test_broadcast(self): + from numpy import arange, nditer + a = arange(3) + b = arange(6).reshape(2,3) + r = [] + it = nditer([a, b]) + assert it.itersize == 6 + for x,y in it: + r.append((x, y)) + assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] + a = arange(2) + exc = raises(ValueError, nditer, [a, b]) + assert str(exc.value).find('shapes (2) (2,3)') > 0 + + def test_outarg(self): + from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') + + def square1(a): + it = nditer([a, None]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square1([1, 2, 3]) == [1, 4, 9]).all() + + def square2(a, out=None): + it = nditer([a, out], flags=['external_loop', 'buffered'], + op_flags=[['readonly'], + ['writeonly', 'allocate', 'no_broadcast']]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square2([1, 2, 3]) == [1, 4, 9]).all() + b = zeros((3, )) + c = square2([1, 2, 3], out=b) + assert (c == [1., 4., 9.]).all() + assert (b == c).all() + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) + assert str(exc.value).find('cannot be broadcasted') > 0 + + def test_outer_product(self): + from numpy import nditer, arange + a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + b = arange(8).reshape(2,4) + it = nditer([a, b, None], flags=['external_loop'], + op_axes=[[0, -1, -1], [-1, 0, 1], None]) + for x, y, z in it: + z[...] = x*y + assert it.operands[2].shape == (3, 2, 4) + for i in range(a.size): + assert (it.operands[2][i] == a[i]*b).all() + + def test_reduction(self): + from numpy import nditer, arange, array + import sys + a = arange(24).reshape(2, 3, 4) + b = array(0) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') + #reduction operands must be readwrite + for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite']]): + y[...] += x + assert b == 276 + assert b == a.sum() + + # reduction and allocation requires op_axes and initialization + it = nditer([a, None], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + # previous example with buffering, requires more flags and reset + it = nditer([a, None], flags=['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + it.reset() + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + def test_get_dtypes(self): + from numpy import array, nditer + x = array([1, 2]) + y = array([1.0, 2.0]) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) + + def test_multi_index(self): + import numpy as np + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + res = [] + while not it.finished: + res.append((it[0], it.multi_index)) + it.iternext() + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -94,14 +94,58 @@ a -> 3 """ - def test_floatadd(self): + def test_float_add(self): result = self.run("float_add") assert result == 3 + 3 - py.test.skip("don't run for now") - self.check_simple_loop({"raw_load": 1, "float_add": 1, - "raw_store": 1, "int_add": 1, - "int_ge": 1, "guard_false": 1, "jump": 1, - 'arraylen_gc': 1}) + self.check_trace_count(1) + self.check_simple_loop({ + 'float_add': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 3, + 'int_add': 9, + 'int_ge': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) + + def define_pow(): + return """ + a = |30| ** 2 + a -> 3 + """ + + def test_pow(self): + result = self.run("pow") + assert result == 3 ** 2 + self.check_trace_count(1) + self.check_simple_loop({ + 'call': 3, + 'float_add': 1, + 'float_eq': 3, + 'float_mul': 2, + 'float_ne': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 4, + 'guard_not_invalidated': 1, + 'guard_true': 5, + 'int_add': 9, + 'int_ge': 1, + 'int_is_true': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) def define_sum(): return """ @@ -520,7 +564,6 @@ 'raw_load': 2, }) self.check_resops({ - 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, @@ -540,11 +583,10 @@ 'int_lt': 11, 'int_sub': 4, 'jump': 3, - 'new_array': 1, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 8, - 'setfield_gc': 15, + 'setarrayitem_gc': 10, + 'setfield_gc': 14, }) def define_argsort(): diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -3,15 +3,18 @@ indirection is introduced to make the version tag change less often. """ +from rpython.rlib import jit, rerased + from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy, _never_equal_to_string -from pypy.objspace.std.dictmultiobject import ObjectDictStrategy -from rpython.rlib import jit, rerased +from pypy.objspace.std.dictmultiobject import ( + DictStrategy, ObjectDictStrategy, _never_equal_to_string, + create_iterator_classes) + class VersionTag(object): pass + class ModuleCell(W_Root): def __init__(self, w_value=None): self.w_value = w_value @@ -19,11 +22,17 @@ def __repr__(self): return "" % (self.w_value, ) + def unwrap_cell(w_value): if isinstance(w_value, ModuleCell): return w_value.w_value return w_value + +def _wrapkey(space, key): + return space.wrap(key) + + class ModuleDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("modulecell") @@ -55,7 +64,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space if space.is_w(space.type(w_key), space.w_str): - self.setitem_str(w_dict, self.space.str_w(w_key), w_value) + self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) w_dict.setitem(w_key, w_value) @@ -66,8 +75,8 @@ cell.w_value = w_value return if cell is not None: - # If the new value and the current value are the same, don't create a - # level of indirection, or mutate the version. + # If the new value and the current value are the same, don't + # create a level of indirection, or mutate the version. if self.space.is_w(w_value, cell): return w_value = ModuleCell(w_value) @@ -121,8 +130,8 @@ return w_dict.getitem(w_key) def getitem_str(self, w_dict, key): - w_res = self.getdictvalue_no_unwrapping(w_dict, key) - return unwrap_cell(w_res) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + return unwrap_cell(cell) def w_keys(self, w_dict): space = self.space @@ -136,37 +145,43 @@ def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([space.wrap(key), unwrap_cell(cell)]) - for key, cell in iterator()] + return [space.newtuple([_wrapkey(space, key), unwrap_cell(cell)]) + for key, cell in iterator()] def clear(self, w_dict): self.unerase(w_dict.dstorage).clear() self.mutated() def popitem(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - key, w_value = d.popitem() + key, cell = d.popitem() self.mutated() - return self.space.wrap(key), unwrap_cell(w_value) + return _wrapkey(space, key), unwrap_cell(cell) def switch_to_object_strategy(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - strategy = self.space.fromcache(ObjectDictStrategy) + strategy = space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - d_new[self.space.wrap(key)] = unwrap_cell(cell) + d_new[_wrapkey(space, key)] = unwrap_cell(cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): return self.unerase(w_dict.dstorage).iterkeys() + def getitervalues(self, w_dict): return self.unerase(w_dict.dstorage).itervalues() + def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() - def wrapkey(space, key): - return space.wrap(key) + + wrapkey = _wrapkey + def wrapvalue(space, value): return unwrap_cell(value) + create_iterator_classes(ModuleDictStrategy) diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -1,12 +1,19 @@ -## ---------------------------------------------------------------------------- -## dict strategy (see dictmultiobject.py) +"""dict implementation specialized for keyword argument dicts. -from rpython.rlib import rerased, jit +Based on two lists containing unwrapped key value pairs. +""" + +from rpython.rlib import jit, rerased + from pypy.objspace.std.dictmultiobject import ( BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, create_iterator_classes) +def _wrapkey(space, key): + return space.wrap(key) + + class EmptyKwargsDictStrategy(EmptyDictStrategy): def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) @@ -21,7 +28,7 @@ unerase = staticmethod(unerase) def wrap(self, key): - return self.space.wrap(key) + return _wrapkey(self.space, key) def unwrap(self, wrapped): return self.space.str_w(wrapped) @@ -117,16 +124,14 @@ def items(self, w_dict): space = self.space keys, values_w = self.unerase(w_dict.dstorage) - result = [] - for i in range(len(keys)): - result.append(space.newtuple([self.wrap(keys[i]), values_w[i]])) - return result + return [space.newtuple([self.wrap(keys[i]), values_w[i]]) + for i in range(len(keys))] def popitem(self, w_dict): keys, values_w = self.unerase(w_dict.dstorage) key = keys.pop() w_value = values_w.pop() - return (self.wrap(key), w_value) + return self.wrap(key), w_value def clear(self, w_dict): w_dict.dstorage = self.get_empty_storage() @@ -164,17 +169,15 @@ keys = self.unerase(w_dict.dstorage)[0] return iter(range(len(keys))) - def wrapkey(space, key): - return space.wrap(key) + wrapkey = _wrapkey def next_item(self): strategy = self.strategy assert isinstance(strategy, KwargsDictStrategy) for i in self.iterator: - keys, values_w = strategy.unerase( - self.dictimplementation.dstorage) - return self.space.wrap(keys[i]), values_w[i] + keys, values_w = strategy.unerase(self.dictimplementation.dstorage) + return _wrapkey(self.space, keys[i]), values_w[i] else: return None, None diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -365,7 +365,7 @@ # we're returning with a guard_not_forced_2, and # additionally we need to say that r0 contains # a reference too: - self._finish_gcmap[0] |= r_uint(0) + self._finish_gcmap[0] |= r_uint(1) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -220,6 +220,28 @@ call_release_gil_target = func._call_aroundstate_target_ call_release_gil_target = llmemory.cast_ptr_to_adr( call_release_gil_target) + elif op.opname == 'indirect_call': + # check that we're not trying to call indirectly some + # function with the special flags + graphs = op.args[-1].value + for graph in (graphs or ()): + if not hasattr(graph, 'func'): + continue + error = None + if hasattr(graph.func, '_elidable_function_'): + error = '@jit.elidable' + if hasattr(graph.func, '_jit_loop_invariant_'): + error = '@jit.loop_invariant' + if hasattr(graph.func, '_call_aroundstate_target_'): + error = '_call_aroundstate_target_' + if not error: + continue + raise Exception( + "%r is an indirect call to a family of functions " + "(or methods) that includes %r. However, the latter " + "is marked %r. You need to use an indirection: replace " + "it with a non-marked function/method which calls the " + "marked function." % (op, graph, error)) # build the extraeffect random_effects = self.randomeffects_analyzer.analyze(op) if random_effects: diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -167,6 +167,12 @@ def is_call_release_gil(self): return bool(self.call_release_gil_target) + def __repr__(self): + more = '' + if self.oopspecindex: + more = ' OS=%r' % (self.oopspecindex,) + return '' % (id(self), self.extraeffect, more) + def frozenset_or_none(x): if x is None: diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3418,6 +3418,26 @@ 'int_sub': 2, 'jump': 1, 'call': 2, 'guard_no_exception': 2, 'int_add': 4}) + def test_elidable_method(self): + py.test.skip("not supported so far: @elidable methods") + class A(object): + @elidable + def meth(self): + return 41 + class B(A): + @elidable + def meth(self): + return 42 + x = B() + def callme(x): + return x.meth() + def f(): + callme(A()) + return callme(x) + res = self.interp_operations(f, []) + assert res == 42 + self.check_operations_history({'finish': 1}) + def test_look_inside_iff_const_getarrayitem_gc_pure(self): driver = JitDriver(greens=['unroll'], reds=['s', 'n']) diff --git a/rpython/translator/c/src/instrument.c b/rpython/translator/c/src/instrument.c --- a/rpython/translator/c/src/instrument.c +++ b/rpython/translator/c/src/instrument.c @@ -6,10 +6,10 @@ #include #include #include +#include +#include #ifndef _WIN32 #include -#include -#include #include #else #include diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -1,3 +1,6 @@ +#ifndef _THREAD_NT_H +#define _THREAD_NT_H +#include #include /* @@ -19,4 +22,4 @@ void RPyThreadReleaseLock(struct RPyOpaque_ThreadLock *lock); long RPyThreadGetStackSize(void); long RPyThreadSetStackSize(long); - +#endif diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -2,6 +2,7 @@ #ifdef _WIN32 +#include #include #define __thread __declspec(thread) typedef DWORD RPyThreadTLS; From noreply at buildbot.pypy.org Fri Apr 18 02:43:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:10 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: adapt tests, progress Message-ID: <20140418004310.51EE61C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70736:ec5e5a6abac5 Date: 2014-04-17 17:40 -0700 http://bitbucket.org/pypy/pypy/changeset/ec5e5a6abac5/ Log: adapt tests, progress diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -351,7 +351,7 @@ limit -= len(self.keyword_names_w) for i in range(len(self.keywords)): if i < limit: - w_key = space.wrap(self.keywords[i]) + w_key = space.wrap(self.keywords[i].decode('utf-8')) else: w_key = self.keyword_names_w[i - limit] space.setitem(w_kwds, w_key, self.keywords_w[i]) @@ -446,7 +446,7 @@ break else: if i < limit: - w_key = space.wrap(keywords[i]) + w_key = space.wrap(keywords[i].decode('utf-8')) else: w_key = keyword_names_w[i - limit] space.setitem(w_kwds, w_key, keywords_w[i]) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -55,9 +55,9 @@ elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) - #elif instance or strdict or module: - # assert w_type is None - # strategy = space.fromcache(BytesDictStrategy) + elif instance or strdict or module: + assert w_type is None + strategy = space.fromcache(UnicodeDictStrategy) elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy @@ -119,6 +119,7 @@ byteslist = space.listview_bytes(w_keys) if byteslist is not None: for key in byteslist: + # XXX: bytes is tied to setitem_str here! w_dict.setitem_str(key, w_fill) else: for w_key in space.listview(w_keys): @@ -430,18 +431,18 @@ return self.erase(None) def switch_to_correct_strategy(self, w_dict, w_key): + from pypy.objspace.std.intobject import W_IntObject withidentitydict = self.space.config.objspace.std.withidentitydict - # if type(w_key) is self.space.StringObjectCls: - # self.switch_to_bytes_strategy(w_dict) - # return + if type(w_key) is self.space.StringObjectCls: + self.switch_to_bytes_strategy(w_dict) + return if type(w_key) is self.space.UnicodeObjectCls: self.switch_to_unicode_strategy(w_dict) return + if type(w_key) is W_IntObject: + self.switch_to_int_strategy(w_dict) + return w_type = self.space.type(w_key) - # XXX: disable IntDictStrategy for now, because in py3k ints are - # actually long - ## if self.space.is_w(w_type, self.space.w_int): - ## self.switch_to_int_strategy(w_dict) if withidentitydict and w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: @@ -700,6 +701,8 @@ def setitem_str(self, w_dict, key, w_value): self.switch_to_object_strategy(w_dict) + # XXX: wrap(key) means we only allow ascii to + # setitem_str. should probaby allow utf-8 w_dict.setitem(self.space.wrap(key), w_value) def setdefault(self, w_dict, w_key, w_default): @@ -722,6 +725,8 @@ return len(self.unerase(w_dict.dstorage)) def getitem_str(self, w_dict, key): + # XXX: wrapping here caused some issues w/ + # ByteDictStrat.. double check return self.getitem(w_dict, self.space.wrap(key)) def getitem(self, w_dict, w_key): @@ -819,6 +824,7 @@ return self.space.wrap(unwrapped) def unwrap(self, wrapped): + # XXX: bytes_w return self.space.str_w(wrapped) def is_correct_type(self, w_obj): @@ -833,23 +839,23 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) + ##def setitem_str(self, w_dict, key, w_value): + ## assert key is not None + ## self.unerase(w_dict.dstorage)[key] = w_value + """ - def setitem_str(self, w_dict, key, w_value): - assert key is not None - self.unerase(w_dict.dstorage)[key] = w_value - def getitem(self, w_dict, w_key): space = self.space # -- This is called extremely often. Hack for performance -- if type(w_key) is space.StringObjectCls: - return self.getitem_str(w_dict, w_key.unwrap(space)) + return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None) # -- End of performance hack -- return AbstractTypedStrategy.getitem(self, w_dict, w_key) + """ - def getitem_str(self, w_dict, key): - assert key is not None - return self.unerase(w_dict.dstorage).get(key, None) - """ + ##def getitem_str(self, w_dict, key): + ## assert key is not None + ## return self.unerase(w_dict.dstorage).get(key, None) def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() @@ -858,7 +864,7 @@ return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): - return space.wrap(key) + return space.wrapbytes(key) @jit.look_inside_iff(lambda self, w_dict: w_dict_unrolling_heuristic(w_dict)) @@ -909,11 +915,19 @@ def getitem(self, w_dict, w_key): space = self.space # -- This is called extremely often. Hack for performance -- - if type(w_key) is space.StringObjectCls: + if type(w_key) is space.UnicodeObjectCls: #return self.getitem_str(w_dict, w_key.unwrap(space)) # XXX: - key = w_key.unwrap(space) - return self.unerase(w_dict.dstorage).get(key, None) + #key = w_key.unwrap(space) # XXX: + #return self.unerase(w_dict.dstorage).get(key, None) + # XXX: triggers failure because s.unwrapped isn't set o_O + + #assert type(self) is UnicodeDictStrategy + #key = self.unwrap(w_key) + #assert key is not None + #return self.unerase(w_dict.dstorage).get(key, None) + #return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None) + return self.unerase(w_dict.dstorage).get(w_key.unwrap(space), None) # -- End of performance hack -- return AbstractTypedStrategy.getitem(self, w_dict, w_key) @@ -925,7 +939,7 @@ return self.unerase(w_dict.dstorage).keys() #def w_keys(self, w_dict): - # # XXX: I think we can completely kill w_keys... + # # XXX: we can completely kill w_keys on py3k # return self.space.newlist_str(self.listview_str(w_dict)) def wrapkey(space, key): @@ -962,8 +976,10 @@ return self.erase({}) def is_correct_type(self, w_obj): - space = self.space - return space.is_w(space.type(w_obj), space.w_int) + from pypy.objspace.std.intobject import W_IntObject + #space = self.space + #return space.is_w(space.type(w_obj), space.w_int) + return type(w_obj) is W_IntObject def _never_equal_to(self, w_lookup_type): space = self.space diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -11,7 +11,7 @@ def _wrapkey(space, key): - return space.wrap(key) + return space.wrap(key.decode('utf-8')) class EmptyKwargsDictStrategy(EmptyDictStrategy): @@ -138,7 +138,7 @@ w_dict.dstorage = self.get_empty_storage() def switch_to_object_strategy(self, w_dict): - strategy = self.space.fromcache(UnicodeDictStrategy) + strategy = self.space.fromcache(ObjectDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) d_new = strategy.unerase(strategy.get_empty_storage()) for i in range(len(keys)): @@ -147,7 +147,7 @@ w_dict.dstorage = strategy.erase(d_new) def switch_to_unicode_strategy(self, w_dict): - strategy = self.space.fromcache(BytesDictStrategy) + strategy = self.space.fromcache(UnicodeDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -125,7 +125,7 @@ assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) def test_fromkeys_fastpath(self): - py.test.py3k_skip("XXX: strategies are currently broken") + py.test.py3k_skip("XXX: list strategies are currently broken") space = self.space w = space.wrap wb = space.wrapbytes @@ -138,7 +138,6 @@ assert space.eq_w(w_d.getitem_str("b"), space.w_None) def test_listview_bytes_dict(self): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap wb = self.space.wrapbytes w_d = self.space.newdict() @@ -152,38 +151,40 @@ assert self.space.listview_unicode(w_d) == [u"a", u"b"] def test_listview_int_dict(self): - py.test.py3k_skip("IntDictStrategy not supported yet") w = self.space.wrap w_d = self.space.newdict() w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) assert self.space.listview_int(w_d) == [1, 2] def test_keys_on_string_unicode_int_dict(self, monkeypatch): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap wb = self.space.wrapbytes w_d = self.space.newdict() w_d.initialize_content([(w(1), wb("a")), (w(2), wb("b"))]) - w_l = self.space.call_method(w_d, "keys") + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_int(w_l)) == [1,2] - # make sure that .keys() calls newlist_bytes for string dicts + # make sure that list(d.keys()) calls newlist_bytes for byte dicts def not_allowed(*args): assert False, 'should not be called' monkeypatch.setattr(self.space, 'newlist', not_allowed) # w_d = self.space.newdict() - w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) - w_l = self.space.call_method(w_d, "keys") - assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] + w_d.initialize_content([(wb("a"), w(1)), (wb("b"), w(6))]) + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) + #XXX: py.test.py3k_skip("XXX: list strategies are currently broken") + #assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), # but we need space.newlist_unicode for it monkeypatch.undo() w_d = self.space.newdict() w_d.initialize_content([(w(u"a"), w(1)), (w(u"b"), w(6))]) - w_l = self.space.call_method(w_d, "keys") + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_unicode(w_l)) == [u"a", u"b"] class AppTest_DictObject: @@ -952,10 +953,9 @@ return r[r.find("(") + 1: r.find(")")] def test_empty_to_string(self): - py3k_skip("StringDictStrategy not supported yet") d = {} assert "EmptyDictStrategy" in self.get_strategy(d) - d["a"] = 1 + d[b"a"] = 1 assert "BytesDictStrategy" in self.get_strategy(d) class O(object): @@ -964,7 +964,7 @@ d = o.__dict__ = {} assert "EmptyDictStrategy" in self.get_strategy(d) o.a = 1 - assert "BytesDictStrategy" in self.get_strategy(d) + assert "UnicodeDictStrategy" in self.get_strategy(d) def test_empty_to_unicode(self): d = {} @@ -1029,10 +1029,17 @@ return str.__hash__(self) class FakeString(FakeWrapper, str): - pass + + def __hash__(self): + self.hash_count += 1 + return str.__hash__(self) class FakeUnicode(FakeWrapper, unicode): - pass + + def __hash__(self): + self.hash_count += 1 + return unicode.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: @@ -1060,6 +1067,8 @@ def type(self, w_obj): if isinstance(w_obj, FakeString): return str + if isinstance(w_obj, FakeUnicode): + return unicode return type(w_obj) w_str = str w_unicode = unicode @@ -1158,11 +1167,13 @@ assert value == d.descr_getitem(self.space, key) class BaseTestRDictImplementation: + FakeString = FakeUnicode + allows__str = False def setup_method(self,method): self.fakespace = FakeSpace() - self.string = self.wrapstroruni("fish") - self.string2 = self.wrapstroruni("fish2") + self.string = self.wrapstrorunicode("fish") + self.string2 = self.wrapstrorunicode("fish2") self.impl = self.get_impl() def wrapstrorunicode(self, obj): @@ -1197,21 +1208,22 @@ else: assert a == self.string2 assert b == 2000 - assert self.impl.getitem_str(self.string) == 1000 + if self.allows__str: + result = self.impl.getitem_str(self.string) + else: + result = self.impl.getitem(self.string) + assert result == 1000 self.check_not_devolved() def test_setitem(self): self.impl.setitem(self.string, 1000) assert self.impl.length() == 1 assert self.impl.getitem(self.string) == 1000 - assert self.impl.getitem_str(self.string) == 1000 - self.check_not_devolved() - - def test_setitem_str(self): - self.impl.setitem_str(self.fakespace.str_w(self.string), 1000) - assert self.impl.length() == 1 - assert self.impl.getitem(self.string) == 1000 - assert self.impl.getitem_str(self.string) == 1000 + if self.allows__str: + result = self.impl.getitem_str(self.string) + else: + result = self.impl.getitem(self.string) + assert result == 1000 self.check_not_devolved() def test_delitem(self): @@ -1275,7 +1287,7 @@ def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names impl = self.impl - key = FakeString(self.string) + key = self.FakeString(self.string) x = impl.setdefault(key, 1) assert x == 1 if on_pypy: @@ -1317,19 +1329,33 @@ class TestUnicodeDictImplementation(BaseTestRDictImplementation): StrategyClass = UnicodeDictStrategy + FakeString = FakeUnicode + allows__str = True def test_str_shortcut(self): self.fill_impl() - #s = FakeString(self.string) - s = FakeUnicode(self.string) + s = self.FakeString(self.string) assert self.impl.getitem(s) == 1000 assert s.unwrapped def test_view_as_kwargs(self): - #py.test.py3k_skip("XXX: strategies are currently broken") self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) + def test_setitem_str(self): + self.impl.setitem_str(self.fakespace.str_w(self.string), 1000) + assert self.impl.length() == 1 + assert self.impl.getitem(self.string) == 1000 + assert self.impl.getitem_str(self.string) == 1000 + self.check_not_devolved() + +class TestBytesDictImplementation(BaseTestRDictImplementation): + StrategyClass = BytesDictStrategy + FakeString = FakeString + + def wrapstrorunicode(self, obj): + return self.fakespace.wrapbytes(obj) + class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): @@ -1344,8 +1370,7 @@ def test_module_uses_strdict(): - py.test.py3k_skip("XXX: strategies are currently broken") fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.strategy) is UnicodeDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import py from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject from pypy.objspace.std.kwargsdict import * @@ -159,3 +160,22 @@ assert a == 3 assert "KwargsDictStrategy" in self.get_strategy(d) + def test_unicode(self): + """ + def f(**kwargs): + return kwargs + + d = f(λ=True) + assert list(d) == ['λ'] + assert "KwargsDictStrategy" in self.get_strategy(d) + + d['foo'] = 'bar' + assert sorted(d) == ['foo', 'λ'] + assert "KwargsDictStrategy" in self.get_strategy(d) + + d = f(λ=True) + o = object() + d[o] = 'baz' + assert set(d) == set(['λ', o]) + assert "ObjectDictStrategy" in self.get_strategy(d) + """ diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -7,7 +7,7 @@ from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject -py.test.py3k_skip("XXX: strategies are currently broken") +#py.test.py3k_skip("XXX: strategies are currently broken") class TestW_ListStrategies(TestW_ListObject): def test_check_strategy(self): From noreply at buildbot.pypy.org Fri Apr 18 02:43:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:11 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: fix cell/mapdict Message-ID: <20140418004311.89F581C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70737:b9437d441ad2 Date: 2014-04-17 17:41 -0700 http://bitbucket.org/pypy/pypy/changeset/b9437d441ad2/ Log: fix cell/mapdict diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -63,7 +63,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) @@ -85,7 +85,7 @@ def setdefault(self, w_dict, w_key, w_default): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) w_result = self.getitem_str(w_dict, key) if w_result is not None: @@ -99,7 +99,7 @@ def delitem(self, w_dict, w_key): space = self.space w_key_type = space.type(w_key) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_unicode): key = space.str_w(w_key) dict_w = self.unerase(w_dict.dstorage) try: @@ -120,7 +120,7 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if space.is_w(w_lookup_type, space.w_unicode): return self.getitem_str(w_dict, space.str_w(w_key)) elif _never_equal_to_string(space, w_lookup_type): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -640,7 +640,7 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if space.is_w(w_lookup_type, space.w_unicode): return self.getitem_str(w_dict, space.str_w(w_key)) elif _never_equal_to_string(space, w_lookup_type): return None @@ -659,7 +659,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): self.setitem_str(w_dict, self.space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) @@ -667,7 +667,7 @@ def setdefault(self, w_dict, w_key, w_default): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) w_result = self.getitem_str(w_dict, key) if w_result is not None: @@ -682,7 +682,7 @@ space = self.space w_key_type = space.type(w_key) w_obj = self.unerase(w_dict.dstorage) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_unicode): key = self.space.str_w(w_key) flag = w_obj.deldictvalue(space, key) if not flag: diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -4,12 +4,12 @@ from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, - FakeString) + FakeUnicode) space = FakeSpace() class TestCellDict(object): - FakeString = FakeString + FakeString = FakeUnicode def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) @@ -50,10 +50,10 @@ v1 = strategy.version x = object() - d.setitem("a", x) + d.setitem(u"a", x) v2 = strategy.version assert v1 is not v2 - d.setitem("a", x) + d.setitem(u"a", x) v3 = strategy.version assert v2 is v3 @@ -70,7 +70,7 @@ assert "ModuleDictStrategy" in __pypy__.internal_repr(obj) def test_check_module_uses_module_dict(self): - py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") + #py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") m = type(__builtins__)("abc") self.impl_used(m.__dict__) @@ -148,7 +148,7 @@ assert x == ("a", 3) def test_degenerate(self): - py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") + #py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") import __pypy__ d = self.d diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1168,7 +1168,7 @@ class BaseTestRDictImplementation: FakeString = FakeUnicode - allows__str = False + allows__str = False # XXX: this is maybe not necessary, just add tests to unicode to ensure we're allowing utf-8? def setup_method(self,method): self.fakespace = FakeSpace() @@ -1290,11 +1290,11 @@ key = self.FakeString(self.string) x = impl.setdefault(key, 1) assert x == 1 - if on_pypy: + if on_pypy and self.FakeString is FakeString: assert key.hash_count == 1 x = impl.setdefault(key, 2) assert x == 1 - if on_pypy: + if on_pypy and self.FakeString is FakeString: assert key.hash_count == 2 def test_fallback_evil_key(self): From noreply at buildbot.pypy.org Fri Apr 18 02:43:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:12 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: adapt disabled list strats Message-ID: <20140418004312.CAE821C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70738:83e84e9c47fc Date: 2014-04-17 17:41 -0700 http://bitbucket.org/pypy/pypy/changeset/83e84e9c47fc/ Log: adapt disabled list strats diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -401,8 +401,9 @@ def buffer_w(w_self, space): return StringBuffer(w_self._value) - def listview_bytes(self): - return _create_list_from_bytes(self._value) + # XXX: could provide listview_int + #def listview_bytes(self): + # return _create_list_from_bytes(self._value) def ord(self, space): if len(self._value) != 1: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -81,13 +81,11 @@ # check for strings # XXX: StringListStrategy is currently broken - """ for w_obj in list_w: if not type(w_obj) is W_BytesObject: break else: return space.fromcache(BytesListStrategy) - """ # check for unicode for w_obj in list_w: @@ -166,12 +164,11 @@ self.switch_to_object_strategy() return self - # XXX: BytesListStrategy is currently broken - #@staticmethod - #def newlist_bytes(space, list_b): - # strategy = space.fromcache(BytesListStrategy) - # storage = strategy.erase(list_b) - # return W_ListObject.from_storage_and_strategy(space, storage, strategy) + @staticmethod + def newlist_bytes(space, list_b): + strategy = space.fromcache(BytesListStrategy) + storage = strategy.erase(list_b) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) @staticmethod def newlist_unicode(space, list_u): @@ -875,8 +872,8 @@ def switch_to_correct_strategy(self, w_list, w_item): if type(w_item) is W_IntObject: strategy = self.space.fromcache(IntegerListStrategy) - #elif type(w_item) is W_BytesObject: - # strategy = self.space.fromcache(BytesListStrategy) + elif type(w_item) is W_BytesObject: + strategy = self.space.fromcache(BytesListStrategy) elif type(w_item) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeListStrategy) elif type(w_item) is W_FloatObject: @@ -1778,7 +1775,7 @@ def lt(self, a, b): return a < b -class StringSort(UnicodeBaseTimSort): +class StringSort(StringBaseTimSort): def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -316,10 +316,8 @@ assert not list_w or sizehint == -1 return W_ListObject(self, list_w, sizehint) - # XXX: BytesListStrategy is currently broken use the default - # implementation, which simply wraps - #def newlist_bytes(self, list_s): - # return W_ListObject.newlist_bytes(self, list_s) + def newlist_bytes(self, list_s): + return W_ListObject.newlist_bytes(self, list_s) def newlist_unicode(self, list_u): return W_ListObject.newlist_unicode(self, list_u) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -186,6 +186,7 @@ def test_setslice(self): space = self.space w = space.wrap + wb = space.wrapbytes l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -642,13 +643,13 @@ def test_string_uses_newlist_bytes(self): space = self.space - w_s = space.wrap("a b c") + w_s = space.wrapbytes("a b c") space.newlist = None try: w_l = space.call_method(w_s, "split") - w_l2 = space.call_method(w_s, "split", space.wrap(" ")) + w_l2 = space.call_method(w_s, "split", space.wrapbytes(" ")) w_l3 = space.call_method(w_s, "rsplit") - w_l4 = space.call_method(w_s, "rsplit", space.wrap(" ")) + w_l4 = space.call_method(w_s, "rsplit", space.wrapbytes(" ")) finally: del space.newlist assert space.listview_bytes(w_l) == ["a", "b", "c"] From noreply at buildbot.pypy.org Fri Apr 18 02:43:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:06 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: merge py3k Message-ID: <20140418004306.96E561C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70733:a98806ecc708 Date: 2014-04-17 16:32 -0700 http://bitbucket.org/pypy/pypy/changeset/a98806ecc708/ Log: merge py3k diff too long, truncating to 2000 out of 2524 lines diff --git a/lib-python/3/sre_compile.py b/lib-python/3/sre_compile.py --- a/lib-python/3/sre_compile.py +++ b/lib-python/3/sre_compile.py @@ -13,7 +13,6 @@ import _sre, sys import sre_parse from sre_constants import * -from _sre import MAXREPEAT assert _sre.MAGIC == MAGIC, "SRE module mismatch" @@ -356,8 +355,6 @@ def _simple(av): # check if av is a "simple" operator lo, hi = av[2].getwidth() - if lo == 0 and hi == MAXREPEAT: - raise error("nothing to repeat") return lo == hi == 1 and av[2][0][0] != SUBPATTERN def _compile_info(code, pattern, flags): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -318,7 +318,7 @@ To read more about the RPython limitations read the `RPython description`_. -.. _`RPython description`: coding-guide.html#restricted-python +.. _`RPython description`: coding-guide.html#rpython-definition --------------------------------------------------------------- Does RPython have anything to do with Zope's Restricted Python? diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -92,7 +92,9 @@ `D07.1 Massive Parallelism and Translation Aspects`_ is a report about PyPy's optimization efforts, garbage collectors and massive parallelism (stackless) features. This report refers to the paper `PyPy's approach -to virtual machine construction`_. *(2007-02-28)* +to virtual machine construction`_. Extends the content previously +available in the document "Memory management and threading models as +translation aspects -- solutions and challenges". *(2007-02-28)* diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,4 +133,10 @@ .. branch: ast-issue1673 fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field \ No newline at end of file +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -339,6 +339,9 @@ return 'internal subclass of %s' % (Class.__name__,) wrappable_class_name._annspecialcase_ = 'specialize:memo' +class CannotHaveLock(Exception): + """Raised by space.allocate_lock() if we're translating.""" + # ____________________________________________________________ class ObjSpace(object): @@ -442,10 +445,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -460,15 +464,25 @@ raise oefmt(self.w_SystemError, "getbuiltinmodule() called with non-builtin module %s", name) + + # Add the module to sys.modules and initialize the module. The + # order is important to avoid recursions. + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) eventlet + # patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 + self.setitem(w_modules, w_name, w_mod) + w_mod.init(self) else: - # Initialize the module - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - w_mod.init(self) - - # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) - return w_mod + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" @@ -676,6 +690,11 @@ def __allocate_lock(self): from rpython.rlib.rthread import allocate_lock, error + # hack: we can't have prebuilt locks if we're translating. + # In this special situation we should just not lock at all + # (translation is not multithreaded anyway). + if not we_are_translated() and self.config.translating: + raise CannotHaveLock() try: return allocate_lock() except error: diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -14,6 +14,7 @@ # after startup(). w_initialdict = None lazy = False + submodule_name = None def __init__(self, space, w_name): """ NOT_RPYTHON """ @@ -31,6 +32,8 @@ space = self.space name = space.unwrap(self.w_name) for sub_name, module_cls in self.submodules.iteritems(): + if module_cls.submodule_name is None: + module_cls.submodule_name = sub_name module_name = space.wrap("%s.%s" % (name, sub_name)) m = module_cls(space, module_name) m.install() @@ -134,6 +137,8 @@ cls.loaders = loaders = {} pkgroot = cls.__module__ appname = cls.get_applevel_name() + if cls.submodule_name is not None: + appname += '.%s' % (cls.submodule_name,) for name, spec in cls.interpleveldefs.items(): loaders[name] = getinterpevalloader(pkgroot, spec) for name, spec in cls.appleveldefs.items(): diff --git a/pypy/module/__pypy__/app_signal.py b/pypy/module/__pypy__/app_signal.py --- a/pypy/module/__pypy__/app_signal.py +++ b/pypy/module/__pypy__/app_signal.py @@ -1,4 +1,9 @@ -import __pypy__.thread +from . import thread +# ^^ relative import of __pypy__.thread. Note that some tests depend on +# this (test_enable_signals in test_signal.py) to work properly, +# otherwise they get caught in some deadlock waiting for the import +# lock... + class SignalsEnabled(object): '''A context manager to use in non-main threads: @@ -8,7 +13,7 @@ that is within a "with signals_enabled:". This other thread should be ready to handle unexpected exceptions that the signal handler might raise --- notably KeyboardInterrupt.''' - __enter__ = __pypy__.thread._signals_enter - __exit__ = __pypy__.thread._signals_exit + __enter__ = thread._signals_enter + __exit__ = thread._signals_exit signals_enabled = SignalsEnabled() diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -8,6 +8,7 @@ def test_signal(self): from __pypy__ import thread + assert type(thread.signals_enabled).__module__ == '__pypy__.thread' with thread.signals_enabled: pass # assert did not crash diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -1,7 +1,6 @@ import py from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir UNICODE_REPLACEMENT_CHARACTER = u'\uFFFD' @@ -15,7 +14,7 @@ return 'EncodeDecodeError(%r, %r, %r)' % (self.start, self.end, self.reason) -srcdir = py.path.local(cdir) +srcdir = py.path.local(__file__).dirpath() codecs = [ # _codecs_cn diff --git a/rpython/translator/c/src/cjkcodecs/README b/pypy/module/_multibytecodec/src/cjkcodecs/README rename from rpython/translator/c/src/cjkcodecs/README rename to pypy/module/_multibytecodec/src/cjkcodecs/README diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_cn.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c rename from rpython/translator/c/src/cjkcodecs/_codecs_cn.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_hk.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c rename from rpython/translator/c/src/cjkcodecs/_codecs_hk.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c rename from rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_jp.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c rename from rpython/translator/c/src/cjkcodecs/_codecs_jp.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_kr.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c rename from rpython/translator/c/src/cjkcodecs/_codecs_kr.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_tw.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c rename from rpython/translator/c/src/cjkcodecs/_codecs_tw.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c diff --git a/rpython/translator/c/src/cjkcodecs/alg_jisx0201.h b/pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h rename from rpython/translator/c/src/cjkcodecs/alg_jisx0201.h rename to pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h diff --git a/rpython/translator/c/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h rename from rpython/translator/c/src/cjkcodecs/cjkcodecs.h rename to pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h diff --git a/rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h b/pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h rename from rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h rename to pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_cn.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h rename from rpython/translator/c/src/cjkcodecs/mappings_cn.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_hk.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h rename from rpython/translator/c/src/cjkcodecs/mappings_hk.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h rename from rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jp.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h rename from rpython/translator/c/src/cjkcodecs/mappings_jp.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_kr.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h rename from rpython/translator/c/src/cjkcodecs/mappings_kr.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_tw.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h rename from rpython/translator/c/src/cjkcodecs/mappings_tw.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c rename from rpython/translator/c/src/cjkcodecs/multibytecodec.c rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h rename from rpython/translator/c/src/cjkcodecs/multibytecodec.h rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,5 +1,6 @@ import py + at py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: py.test.skip("genreflex is not installed") diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,13 +2,13 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat +import sys, os, stat, genericpath from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, generic_new_descr from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock from pypy.interpreter.eval import Code from pypy.interpreter.pycode import PyCode from rpython.rlib import streamio, jit @@ -528,7 +528,8 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - if os.path.isdir(filepart) and case_ok(filepart): + # os.path.isdir on win32 is not rpython when pywin32 installed + if genericpath.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): @@ -585,7 +586,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None @@ -759,26 +761,14 @@ me = self.space.getexecutioncontext() # used as thread ident return self.lockowner is me - def _can_have_lock(self): - # hack: we can't have self.lock != None during translation, - # because prebuilt lock objects are not allowed. In this - # special situation we just don't lock at all (translation is - # not multithreaded anyway). - if we_are_translated(): - return True # we need a lock at run-time - elif self.space.config.translating: - assert self.lock is None - return False - else: - return True # in py.py - def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock if self.lock is None: - if not self._can_have_lock(): + try: + self.lock = self.space.allocate_lock() + except CannotHaveLock: return - self.lock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is me: pass # already acquired by the current thread @@ -796,7 +786,7 @@ # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return - if not self._can_have_lock(): + if self.lock is None: # CannotHaveLock occurred return space = self.space raise OperationError(space.w_RuntimeError, diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -219,7 +219,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -239,7 +238,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -652,7 +652,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -661,19 +660,18 @@ def test_reimport_builtin(self): import imp, sys, time - skip("fix me") oldpath = sys.path - time.tzset = "" + time.tzname = "" del sys.modules['time'] import time as time1 assert sys.modules['time'] is time1 - assert time.tzset == "" + assert time.tzname == "" - imp.reload(time1) # don't leave a broken time.tzset behind + imp.reload(time1) # don't leave a broken time.tzname behind import time - assert time.tzset != "" + assert time.tzname != "" def test_reload_infinite(self): import infinite_reload diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import math import _numpypy diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -37,7 +37,7 @@ "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -388,6 +388,8 @@ w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': w_res = w_lhs.descr_sub(interp.space, w_rhs) + elif self.name == '**': + w_res = w_lhs.descr_pow(interp.space, w_rhs) elif self.name == '->': if isinstance(w_rhs, FloatObject): w_rhs = IntObject(int(w_rhs.floatval)) @@ -596,6 +598,8 @@ arg = self.args[1].execute(interp) if self.name == 'view': w_res = arr.descr_view(interp.space, arg) + elif self.name == 'astype': + w_res = arr.descr_astype(interp.space, arg) else: assert False else: @@ -620,7 +624,7 @@ (':', 'colon'), ('\w+', 'identifier'), ('\]', 'array_right'), - ('(->)|[\+\-\*\/]', 'operator'), + ('(->)|[\+\-\*\/]+', 'operator'), ('=', 'assign'), (',', 'comma'), ('\|', 'pipe'), diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -90,32 +91,41 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.reset() + self.index = 0 + self.indices = [0] * len(shape) + self.offset = array.start + + @jit.unroll_safe def reset(self): self.index = 0 - self.indices = [0] * len(self.shape_m1) + for i in xrange(self.ndim_m1, -1, -1): + self.indices[i] = 0 self.offset = self.array.start + @jit.unroll_safe def next(self): self.index += 1 for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < self.shape_m1[i]: - self.indices[i] += 1 + idx = self.indices[i] + if idx < self.shape_m1[i]: + self.indices[i] = idx + 1 self.offset += self.strides[i] break else: self.indices[i] = 0 self.offset -= self.backstrides[i] + @jit.unroll_safe def next_skip_x(self, step): assert step >= 0 if step == 0: return self.index += step for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < (self.shape_m1[i] + 1) - step: - self.indices[i] += step + idx = self.indices[i] + if idx < (self.shape_m1[i] + 1) - step: + self.indices[i] = idx + step self.offset += self.strides[i] * step break else: @@ -139,6 +149,39 @@ self.array.setitem(self.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal + + def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -996,7 +996,8 @@ descr_cumsum = _reduce_ufunc_impl('add', cumulative=True) descr_cumprod = _reduce_ufunc_impl('multiply', cumulative=True) - def _reduce_argmax_argmin_impl(op_name): + def _reduce_argmax_argmin_impl(raw_name): + op_name = "arg%s" % raw_name def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): raise oefmt(space.w_NotImplementedError, @@ -1007,18 +1008,17 @@ if self.get_size() == 0: raise oefmt(space.w_ValueError, "Can't call %s on zero-size arrays", op_name) - op = getattr(loop, op_name) try: - res = op(self) + getattr(self.get_dtype().itemtype, raw_name) except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', op_name, self.get_dtype().get_name()) - return space.wrap(res) - return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) + return space.wrap(getattr(loop, op_name)(self)) + return func_with_new_name(impl, "reduce_%s_impl" % op_name) - descr_argmax = _reduce_argmax_argmin_impl("argmax") - descr_argmin = _reduce_argmax_argmin_impl("argmin") + descr_argmax = _reduce_argmax_argmin_impl("max") + descr_argmin = _reduce_argmax_argmin_impl("min") def descr_int(self, space): if self.get_size() != 1: diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/nditer.py @@ -0,0 +1,595 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.error import OperationError +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy import ufuncs, support + + +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, space, array): + raise NotImplementedError("Abstract Class") + +class IteratorMixin(object): + _mixin_ = True + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item[self.index](space, array, self.it) + + def setitem(self, space, array, val): + xxx + +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 + +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 + +def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + ret = [] + if space.is_w(w_op_flags, space.w_None): + for i in range(n): + ret.append(OpFlag()) + elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + else: + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise OperationError(space.w_ValueError, space.wrap( + '%s must be a tuple or array of per-op flag-tuples' % name)) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) + return ret + +class OpFlag(object): + def __init__(self): + self.rw = 'r' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + self.get_it_item = (get_readonly_item, get_readonly_slice) + +def get_readonly_item(space, array, it): + return space.wrap(it.getitem()) + +def get_readwrite_item(space, array, it): + #create a single-value view (since scalars are not views) + res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) + #it.dtype.setitem(res, 0, it.getitem()) + return W_NDimArray(res) + +def get_readonly_slice(space, array, it): + return W_NDimArray(it.getslice().readonly()) + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + +def parse_op_flag(space, lst): + op_flag = OpFlag() + for w_item in lst: + item = space.str_w(w_item) + if item == 'readonly': + op_flag.rw = 'r' + elif item == 'readwrite': + op_flag.rw = 'rw' + elif item == 'writeonly': + op_flag.rw = 'w' + elif item == 'no_broadcast': + op_flag.broadcast = False + elif item == 'contig': + op_flag.force_contig = True + elif item == 'aligned': + op_flag.force_align = True + elif item == 'nbo': + op_flag.native_byte_order = True + elif item == 'copy': + op_flag.tmp_copy = 'r' + elif item == 'updateifcopy': + op_flag.tmp_copy = 'rw' + elif item == 'allocate': + op_flag.allocate = True + elif item == 'no_subtype': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"no_subtype" op_flag not implemented yet')) + elif item == 'arraymask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"arraymask" op_flag not implemented yet')) + elif item == 'writemask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"writemask" op_flag not implemented yet')) + else: + raise OperationError(space.w_ValueError, space.wrap( + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == 'r': + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) + elif op_flag.rw == 'rw': + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + return op_flag + +def parse_func_flags(space, nditer, w_flags): + if space.is_w(w_flags, space.w_None): + return + elif not space.isinstance_w(w_flags, space.w_tuple) and not \ + space.isinstance_w(w_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + 'Iter global flags must be a list or tuple of strings')) + lst = space.listview(w_flags) + for w_item in lst: + if not space.isinstance_w(w_item, space.w_str) and not \ + space.isinstance_w(w_item, space.w_unicode): + typename = space.type(w_item).getname(space) + raise OperationError(space.w_TypeError, space.wrap( + 'expected string or Unicode object, %s found' % typename)) + item = space.str_w(w_item) + if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) + nditer.external_loop = True + elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) + # For numpy compatability + nditer.buffered = True + elif item == 'c_index': + nditer.tracked_index = 'C' + elif item == 'f_index': + nditer.tracked_index = 'F' + elif item == 'multi_index': + nditer.tracked_index = 'multi' + elif item == 'common_dtype': + nditer.common_dtype = True + elif item == 'delay_bufalloc': + nditer.delay_bufalloc = True + elif item == 'grow_inner': + nditer.grow_inner = True + elif item == 'ranged': + nditer.ranged = True + elif item == 'refs_ok': + nditer.refs_ok = True + elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) + nditer.reduce_ok = True + elif item == 'zerosize_ok': + nditer.zerosize_ok = True + else: + raise OperationError(space.w_ValueError, space.wrap( + 'Unexpected iterator global flag "%s"' % item)) + if nditer.tracked_index and nditer.external_loop: + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order =='F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation + backward = is_backward(imp, order) + if arr.is_scalar(): + return ArrayIter(imp, 1, [], [], []) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret + +class W_NDIter(W_Root): + + def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, + w_op_axes, w_itershape, w_buffersize, order): + self.order = order + self.external_loop = False + self.buffered = False + self.tracked_index = '' + self.common_dtype = False + self.delay_bufalloc = False + self.grow_inner = False + self.ranged = False + self.refs_ok = False + self.reduce_ok = False + self.zerosize_ok = False + self.index_iter = None + self.done = False + self.first_next = True + self.op_axes = [] + # convert w_seq operands to a list of W_NDimArray + if space.isinstance_w(w_seq, space.w_tuple) or \ + space.isinstance_w(w_seq, space.w_list): + w_seq_as_list = space.listview(w_seq) + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + else: + self.seq =[convert_to_array(space, w_seq)] + + parse_func_flags(space, self, w_flags) + self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, + len(self.seq), parse_op_flag) + # handle w_op_axes + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) + + # handle w_op_dtypes part 1: creating self.dtypes list from input + if not space.is_none(w_op_dtypes): + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] + + # handle None or writable operands, calculate my shape + self.iters=[] + outargs = [i for i in range(len(self.seq)) \ + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation + if len(self.dtypes) > 0: + out_dtype = self.dtypes[outargs[0]] + else: + out_dtype = None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype(space, + self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) + + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + if self.tracked_index == "multi": + backward = False + else: + backward = self.order != self.tracked_index + self.index_iter = IndexIterator(iter_shape, backward=backward) + + # handle w_op_dtypes part 2: copy where needed if possible + if len(self.dtypes) > 0: + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d: + if not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering for operand %d" % i)) + impl = self.seq[i].implementation + new_impl = impl.astype(space, selfd) + self.seq[i] = W_NDimArray(new_impl) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] + + # create an iterator for each operand + if self.external_loop: + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + def set_op_axes(self, space, w_op_axes): + if space.len_w(w_op_axes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + op_axes = space.listview(w_op_axes) + l = -1 + for w_axis in op_axes: + if not space.is_none(w_axis): + axis_len = space.len_w(w_axis) + if l == -1: + l = axis_len + elif axis_len != l: + raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + if l == -1: + raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 + + def descr_iter(self, space): + return space.wrap(self) + + def descr_getitem(self, space, w_idx): + idx = space.int_w(w_idx) + try: + ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + except IndexError: + raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + return ret + + def descr_setitem(self, space, w_idx, w_value): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_len(self, space): + space.wrap(len(self.iters)) + + def descr_next(self, space): + for it in self.iters: + if not it.done(): + break + else: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + res = [] + if self.index_iter: + if not self.first_next: + self.index_iter.next() + else: + self.first_next = False + for i in range(len(self.iters)): + res.append(self.iters[i].getitem(space, self.seq[i])) + self.iters[i].next() + if len(res) <2: + return res[0] + return space.newtuple(res) + + def iternext(self): + if self.index_iter: + self.index_iter.next() + for i in range(len(self.iters)): + self.iters[i].next() + for it in self.iters: + if not it.done(): + break + else: + self.done = True + return self.done + return self.done + + def descr_iternext(self, space): + return space.wrap(self.iternext()) + + def descr_copy(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_debug_print(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_enable_external_loop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + @unwrap_spec(axis=int) + def descr_remove_axis(self, space, axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_remove_multi_index(self, space, w_multi_index): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_reset(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_operands(self, space): + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) + + def descr_get_dtypes(self, space): + res = [None] * len(self.seq) + for i in range(len(self.seq)): + res[i] = self.seq[i].descr_get_dtype(space) + return space.newtuple(res) + + def descr_get_finished(self, space): + return space.wrap(self.done) + + def descr_get_has_delayed_bufalloc(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_has_index(self, space): + return space.wrap(self.tracked_index in ["C", "F"]) + + def descr_get_index(self, space): + if not self.tracked_index in ["C", "F"]: + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.wrap(self.index_iter.getvalue()) + + def descr_get_has_multi_index(self, space): + return space.wrap(self.tracked_index == "multi") + + def descr_get_multi_index(self, space): + if not self.tracked_index == "multi": + raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.newtuple([space.wrap(x) for x in self.index_iter.index]) + + def descr_get_iterationneedsapi(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_iterindex(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itersize(self, space): + return space.wrap(support.product(self.shape)) + + def descr_get_itviews(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_ndim(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_nop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_shape(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_value(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + + at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes = WrappedDefault(None), order=str, + w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) +def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order='K'): + return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order) + +W_NDIter.typedef = TypeDef( + 'nditer', + __iter__ = interp2app(W_NDIter.descr_iter), + __getitem__ = interp2app(W_NDIter.descr_getitem), + __setitem__ = interp2app(W_NDIter.descr_setitem), + __len__ = interp2app(W_NDIter.descr_len), + + next = interp2app(W_NDIter.descr_next), + iternext = interp2app(W_NDIter.descr_iternext), + copy = interp2app(W_NDIter.descr_copy), + debug_print = interp2app(W_NDIter.descr_debug_print), + enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), + remove_axis = interp2app(W_NDIter.descr_remove_axis), + remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), + reset = interp2app(W_NDIter.descr_reset), + + operands = GetSetProperty(W_NDIter.descr_get_operands), + dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), + finished = GetSetProperty(W_NDIter.descr_get_finished), + has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), + has_index = GetSetProperty(W_NDIter.descr_get_has_index), + index = GetSetProperty(W_NDIter.descr_get_index), + has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), + multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), + iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), + iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), + itersize = GetSetProperty(W_NDIter.descr_get_itersize), + itviews = GetSetProperty(W_NDIter.descr_get_itviews), + ndim = GetSetProperty(W_NDIter.descr_get_ndim), + nop = GetSetProperty(W_NDIter.descr_get_nop), + shape = GetSetProperty(W_NDIter.descr_get_shape), + value = GetSetProperty(W_NDIter.descr_get_value), +) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -319,3 +319,14 @@ ''') results = interp.results[0] assert isinstance(results, W_NDimArray) + + def test_astype_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = astype(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + assert results.get_dtype().is_int() diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -0,0 +1,302 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNDIter(BaseNumpyAppTest): + def test_basic(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + + for x in nditer(a.T): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + + def test_order(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a, order='C'): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + for x in nditer(a, order='F'): + r.append(x) + assert r == [0, 3, 1, 4, 2, 5] + + def test_readwrite(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + for x in nditer(a, op_flags=['readwrite']): + x[...] = 2 * x + assert (a == [[0, 2, 4], [6, 8, 10]]).all() + + def test_external_loop(self): + from numpy import arange, nditer, array + a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + r = [] + n = 0 + for x in nditer(a, flags=['external_loop']): + r.append(x) + n += 1 + assert n == 1 + assert (array(r) == range(24)).all() + r = [] + n = 0 + for x in nditer(a, flags=['external_loop'], order='F'): + r.append(x) + n += 1 + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + + def test_index(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + exc = None + try: + it.index + except ValueError, e: + exc = e + assert exc + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + + def test_interface(self): + from numpy import arange, nditer, zeros + import sys + a = arange(6).reshape(2,3) + r = [] + it = nditer(a, flags=['f_index']) + while not it.finished: + r.append((it[0], it.index)) + it.iternext() + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') + while not it.finished: + it[0] = it.multi_index[1] - it.multi_index[0] + it.iternext() + assert (a == [[0, 1, 2], [-1, 0, 1]]).all() + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + + def test_buffered(self): + from numpy import arange, nditer, array + a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') + r = [] + for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() + + def test_op_dtype(self): + from numpy import arange, nditer, sqrt, array + a = arange(6).reshape(2,3) - 3 + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) + assert str(exc.value).startswith("Iterator operand required copying or buffering") + r = [] + for x in nditer(a, op_flags=['readonly','copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + r = [] + for x in nditer(a, op_flags=['copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], + op_dtypes = ['int64', 'int64', 'float64'], + op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) + for a, b, c in multi: + a[...] = b * c + assert (multi.operands[0] == [4, 6]).all() + + def test_casting(self): + from numpy import arange, nditer + import sys + a = arange(6.) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], + casting='same_kind'): + r.append(x) + assert r == [0., 1., 2., 3., 4., 5.] + exc = raises(TypeError, nditer, a, flags=['buffered'], + op_dtypes=['int32'], casting='same_kind') + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + b = arange(6) + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + op_flags=['readwrite'], casting='same_kind') + assert str(exc.value).startswith("Iterator requested dtype could not be cast") + + def test_broadcast(self): + from numpy import arange, nditer + a = arange(3) + b = arange(6).reshape(2,3) + r = [] + it = nditer([a, b]) + assert it.itersize == 6 + for x,y in it: + r.append((x, y)) + assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] + a = arange(2) + exc = raises(ValueError, nditer, [a, b]) + assert str(exc.value).find('shapes (2) (2,3)') > 0 + + def test_outarg(self): + from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') + + def square1(a): + it = nditer([a, None]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square1([1, 2, 3]) == [1, 4, 9]).all() + + def square2(a, out=None): + it = nditer([a, out], flags=['external_loop', 'buffered'], + op_flags=[['readonly'], + ['writeonly', 'allocate', 'no_broadcast']]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square2([1, 2, 3]) == [1, 4, 9]).all() + b = zeros((3, )) + c = square2([1, 2, 3], out=b) + assert (c == [1., 4., 9.]).all() + assert (b == c).all() + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) + assert str(exc.value).find('cannot be broadcasted') > 0 + + def test_outer_product(self): + from numpy import nditer, arange + a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + b = arange(8).reshape(2,4) + it = nditer([a, b, None], flags=['external_loop'], + op_axes=[[0, -1, -1], [-1, 0, 1], None]) + for x, y, z in it: + z[...] = x*y + assert it.operands[2].shape == (3, 2, 4) + for i in range(a.size): + assert (it.operands[2][i] == a[i]*b).all() + + def test_reduction(self): + from numpy import nditer, arange, array + import sys + a = arange(24).reshape(2, 3, 4) + b = array(0) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') + #reduction operands must be readwrite + for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite']]): + y[...] += x + assert b == 276 + assert b == a.sum() + + # reduction and allocation requires op_axes and initialization + it = nditer([a, None], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + # previous example with buffering, requires more flags and reset + it = nditer([a, None], flags=['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + it.reset() + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + def test_get_dtypes(self): + from numpy import array, nditer + x = array([1, 2]) + y = array([1.0, 2.0]) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) + + def test_multi_index(self): + import numpy as np + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + res = [] + while not it.finished: + res.append((it[0], it.multi_index)) + it.iternext() + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -94,14 +94,58 @@ a -> 3 """ - def test_floatadd(self): + def test_float_add(self): result = self.run("float_add") assert result == 3 + 3 - py.test.skip("don't run for now") - self.check_simple_loop({"raw_load": 1, "float_add": 1, - "raw_store": 1, "int_add": 1, - "int_ge": 1, "guard_false": 1, "jump": 1, - 'arraylen_gc': 1}) + self.check_trace_count(1) + self.check_simple_loop({ + 'float_add': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 3, + 'int_add': 9, + 'int_ge': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) + + def define_pow(): + return """ + a = |30| ** 2 + a -> 3 + """ + + def test_pow(self): + result = self.run("pow") + assert result == 3 ** 2 + self.check_trace_count(1) + self.check_simple_loop({ + 'call': 3, + 'float_add': 1, + 'float_eq': 3, + 'float_mul': 2, + 'float_ne': 1, + 'getarrayitem_gc': 3, + 'getfield_gc': 7, + 'guard_false': 4, + 'guard_not_invalidated': 1, + 'guard_true': 5, + 'int_add': 9, + 'int_ge': 1, + 'int_is_true': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + 'setfield_gc': 6, + }) def define_sum(): return """ @@ -482,16 +526,19 @@ assert result == 1.0 self.check_trace_count(1) self.check_simple_loop({ - 'call': 2, - 'getfield_gc': 2, - 'guard_no_exception': 2, + 'getarrayitem_gc': 2, + 'getfield_gc': 4, 'guard_not_invalidated': 1, - 'guard_true': 1, + 'guard_true': 3, + 'int_add': 6, 'int_gt': 1, + 'int_lt': 2, 'int_sub': 1, 'jump': 1, 'raw_load': 1, 'raw_store': 1, + 'setarrayitem_gc': 2, + 'setfield_gc': 4, }) def define_dot(): @@ -506,36 +553,41 @@ result = self.run("dot") assert result == 184 self.check_trace_count(3) - self.check_simple_loop({'float_add': 1, - 'float_mul': 1, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 3, - 'int_lt': 1, - 'jump': 1, - 'raw_load': 2}) - self.check_resops({'arraylen_gc': 1, - 'call': 3, - 'float_add': 2, - 'float_mul': 2, - 'getfield_gc': 26, - 'getfield_gc_pure': 24, - 'guard_class': 4, - 'guard_false': 2, - 'guard_no_exception': 3, - 'guard_nonnull': 12, - 'guard_nonnull_class': 4, - 'guard_not_invalidated': 2, - 'guard_true': 9, - 'guard_value': 4, - 'int_add': 6, - 'int_ge': 3, - 'int_lt': 4, - 'jump': 3, - 'new_array': 1, - 'raw_load': 6, - 'raw_store': 1, - 'setfield_gc': 3}) + self.check_simple_loop({ + 'float_add': 1, + 'float_mul': 1, + 'guard_not_invalidated': 1, + 'guard_true': 1, + 'int_add': 3, + 'int_lt': 1, + 'jump': 1, + 'raw_load': 2, + }) + self.check_resops({ + 'float_add': 2, + 'float_mul': 2, + 'getarrayitem_gc': 7, + 'getarrayitem_gc_pure': 15, + 'getfield_gc': 35, + 'getfield_gc_pure': 39, + 'guard_class': 4, + 'guard_false': 14, + 'guard_nonnull': 12, + 'guard_nonnull_class': 4, + 'guard_not_invalidated': 2, + 'guard_true': 13, + 'guard_value': 4, + 'int_add': 25, + 'int_ge': 4, + 'int_le': 8, + 'int_lt': 11, + 'int_sub': 4, + 'jump': 3, + 'raw_load': 6, + 'raw_store': 1, + 'setarrayitem_gc': 10, + 'setfield_gc': 14, + }) def define_argsort(): return """ diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -442,6 +442,7 @@ return v1 % v2 @simple_binary_op + @jit.look_inside_iff(lambda self, v1, v2: jit.isconstant(v2)) def pow(self, v1, v2): if v2 < 0: return 0 diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -3,15 +3,18 @@ indirection is introduced to make the version tag change less often. """ +from rpython.rlib import jit, rerased + from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy, _never_equal_to_string -from pypy.objspace.std.dictmultiobject import ObjectDictStrategy -from rpython.rlib import jit, rerased +from pypy.objspace.std.dictmultiobject import ( + DictStrategy, ObjectDictStrategy, _never_equal_to_string, + create_iterator_classes) + class VersionTag(object): pass + class ModuleCell(W_Root): def __init__(self, w_value=None): self.w_value = w_value @@ -19,11 +22,17 @@ def __repr__(self): return "" % (self.w_value, ) + def unwrap_cell(w_value): if isinstance(w_value, ModuleCell): return w_value.w_value return w_value + +def _wrapkey(space, key): + return space.wrap(key) + + class ModuleDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("modulecell") @@ -55,7 +64,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space if space.is_w(space.type(w_key), space.w_str): - self.setitem_str(w_dict, self.space.str_w(w_key), w_value) + self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) w_dict.setitem(w_key, w_value) @@ -66,8 +75,8 @@ cell.w_value = w_value return if cell is not None: - # If the new value and the current value are the same, don't create a - # level of indirection, or mutate the version. + # If the new value and the current value are the same, don't + # create a level of indirection, or mutate the version. if self.space.is_w(w_value, cell): return w_value = ModuleCell(w_value) @@ -121,8 +130,8 @@ return w_dict.getitem(w_key) def getitem_str(self, w_dict, key): - w_res = self.getdictvalue_no_unwrapping(w_dict, key) - return unwrap_cell(w_res) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + return unwrap_cell(cell) def w_keys(self, w_dict): space = self.space @@ -136,37 +145,43 @@ def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([space.wrap(key), unwrap_cell(cell)]) - for key, cell in iterator()] + return [space.newtuple([_wrapkey(space, key), unwrap_cell(cell)]) + for key, cell in iterator()] def clear(self, w_dict): self.unerase(w_dict.dstorage).clear() self.mutated() def popitem(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - key, w_value = d.popitem() + key, cell = d.popitem() self.mutated() - return self.space.wrap(key), unwrap_cell(w_value) + return _wrapkey(space, key), unwrap_cell(cell) def switch_to_object_strategy(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - strategy = self.space.fromcache(ObjectDictStrategy) + strategy = space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - d_new[self.space.wrap(key)] = unwrap_cell(cell) + d_new[_wrapkey(space, key)] = unwrap_cell(cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): return self.unerase(w_dict.dstorage).iterkeys() + def getitervalues(self, w_dict): return self.unerase(w_dict.dstorage).itervalues() + def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() - def wrapkey(space, key): - return space.wrap(key) + + wrapkey = _wrapkey + def wrapvalue(space, value): return unwrap_cell(value) + create_iterator_classes(ModuleDictStrategy) diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -1,12 +1,19 @@ -## ---------------------------------------------------------------------------- -## dict strategy (see dictmultiobject.py) +"""dict implementation specialized for keyword argument dicts. -from rpython.rlib import rerased, jit +Based on two lists containing unwrapped key value pairs. +""" + +from rpython.rlib import jit, rerased + from pypy.objspace.std.dictmultiobject import ( DictStrategy, EmptyDictStrategy, ObjectDictStrategy, UnicodeDictStrategy, create_iterator_classes) +def _wrapkey(space, key): + return space.wrap(key) + + class EmptyKwargsDictStrategy(EmptyDictStrategy): def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) @@ -21,7 +28,7 @@ unerase = staticmethod(unerase) def wrap(self, key): - return self.space.wrap(key) + return _wrapkey(self.space, key) def unwrap(self, wrapped): return self.space.str_w(wrapped) @@ -118,16 +125,14 @@ def items(self, w_dict): space = self.space keys, values_w = self.unerase(w_dict.dstorage) - result = [] - for i in range(len(keys)): - result.append(space.newtuple([self.wrap(keys[i]), values_w[i]])) - return result + return [space.newtuple([self.wrap(keys[i]), values_w[i]]) + for i in range(len(keys))] def popitem(self, w_dict): keys, values_w = self.unerase(w_dict.dstorage) key = keys.pop() w_value = values_w.pop() - return (self.wrap(key), w_value) + return self.wrap(key), w_value def clear(self, w_dict): w_dict.dstorage = self.get_empty_storage() @@ -165,17 +170,15 @@ keys = self.unerase(w_dict.dstorage)[0] return iter(range(len(keys))) - def wrapkey(space, key): - return space.wrap(key) + wrapkey = _wrapkey def next_item(self): strategy = self.strategy assert isinstance(strategy, KwargsDictStrategy) for i in self.iterator: - keys, values_w = strategy.unerase( - self.dictimplementation.dstorage) - return self.space.wrap(keys[i]), values_w[i] + keys, values_w = strategy.unerase(self.dictimplementation.dstorage) + return _wrapkey(self.space, keys[i]), values_w[i] else: return None, None diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -683,7 +683,7 @@ raise OperationError(space.w_ValueError, space.wrap("list modified during sort")) -find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') +find_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'list.find') class ListStrategy(object): @@ -709,8 +709,9 @@ space = self.space i = start # needs to be safe against eq_w mutating stuff + tp = space.type(w_item) while i < stop and i < w_list.length(): - find_jmp.jit_merge_point() + find_jmp.jit_merge_point(tp=tp) if space.eq_w(w_list.getitem(i), w_item): return i i += 1 diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1070,6 +1070,7 @@ def _intersect_wrapped(self, w_set, w_other): result = newset(self.space) for key in self.unerase(w_set.sstorage): + self.intersect_jmp.jit_merge_point() w_key = self.wrap(key) if w_other.has_key(w_key): result[w_key] = None @@ -1180,6 +1181,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(bytes).intersect') + def get_empty_storage(self): return self.erase({}) @@ -1216,6 +1220,9 @@ erase = staticmethod(erase) unerase = staticmethod(unerase) + intersect_jmp = jit.JitDriver(greens = [], reds = 'auto', + name='set(unicode).intersect') From noreply at buildbot.pypy.org Fri Apr 18 02:43:14 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:14 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: progress Message-ID: <20140418004314.1A1F21C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70739:2caa57866049 Date: 2014-04-17 17:41 -0700 http://bitbucket.org/pypy/pypy/changeset/2caa57866049/ Log: progress diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1192,13 +1192,13 @@ w_ann = None if num_annotations: names_w = space.fixedview(self.popvalue()) - w_ann = space.newdict(strdict=True) # XXX: strdict?? + w_ann = space.newdict(strdict=True) for i in range(len(names_w) - 1, -1, -1): space.setitem(w_ann, names_w[i], self.popvalue()) defaultarguments = self.popvalues(posdefaults) w_kw_defs = None if kwdefaults: - w_kw_defs = space.newdict(strdict=True) # XXX: + w_kw_defs = space.newdict(strdict=True) for i in range(kwdefaults - 1, -1, -1): w_name = self.popvalue() w_def = self.popvalue() diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -75,21 +75,18 @@ assert x == 42 def test_list_strategy(self): - py3k_skip("XXX: strategies are currently broken") from __pypy__ import list_strategy l = [1, 2, 3] assert list_strategy(l) == "int" + l = list(range(1, 2)) + assert list_strategy(l) == "int" l = [b"a", b"b", b"c"] assert list_strategy(l) == "bytes" l = ["a", "b", "c"] assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" - l = range(3) - assert list_strategy(l) == "simple_range" - l = range(1, 2) - assert list_strategy(l) == "range" l = [1, "b", 3] assert list_strategy(l) == "object" l = [] diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -16,7 +16,6 @@ W_CType.pack_list_of_items = self._original def test_fast_init_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend LONG = _cffi_backend.new_primitive_type('long') P_LONG = _cffi_backend.new_pointer_type(LONG) @@ -37,7 +36,6 @@ assert buf[2] == 3.3 def test_fast_init_short_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend SHORT = _cffi_backend.new_primitive_type('short') P_SHORT = _cffi_backend.new_pointer_type(SHORT) @@ -50,7 +48,6 @@ raises(OverflowError, _cffi_backend.newp, SHORT_ARRAY, [-40000]) def test_fast_init_longlong_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend import sys large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) @@ -64,7 +61,6 @@ assert buf[3] == large_int def test_fast_init_ushort_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend USHORT = _cffi_backend.new_primitive_type('unsigned short') P_USHORT = _cffi_backend.new_pointer_type(USHORT) @@ -77,18 +73,17 @@ raises(OverflowError, _cffi_backend.newp, USHORT_ARRAY, [-1]) def test_fast_init_ulong_from_list(self): - py3k_skip('XXX: strategies are currently broken') import sys import _cffi_backend ULONG = _cffi_backend.new_primitive_type('unsigned long') P_ULONG = _cffi_backend.new_pointer_type(ULONG) ULONG_ARRAY = _cffi_backend.new_array_type(P_ULONG, None) - buf = _cffi_backend.newp(ULONG_ARRAY, [1, 2, sys.maxint]) + buf = _cffi_backend.newp(ULONG_ARRAY, [1, 2, sys.maxsize]) assert buf[0] == 1 assert buf[1] == 2 - assert buf[2] == sys.maxint + assert buf[2] == sys.maxsize raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-1]) - raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-sys.maxint]) + raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-sys.maxsize]) def test_fast_init_cfloat_from_list(self): import _cffi_backend @@ -109,7 +104,6 @@ assert float(buf[1]) == -3.5 def test_fast_init_bool_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend BOOL = _cffi_backend.new_primitive_type('_Bool') P_BOOL = _cffi_backend.new_pointer_type(BOOL) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -621,8 +621,8 @@ l = space.listview_bytes(w_list) if l is not None: if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) + return space.wrapbytes(l[0]) + return space.wrapbytes(self._val(space).join(l)) return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -30,7 +30,7 @@ def _wrapkey(space, key): - return space.wrap(key) + return space.wrap(key.decode('utf-8')) class ModuleDictStrategy(DictStrategy): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -824,8 +824,7 @@ return self.space.wrap(unwrapped) def unwrap(self, wrapped): - # XXX: bytes_w - return self.space.str_w(wrapped) + return self.space.bytes_w(wrapped) def is_correct_type(self, w_obj): space = self.space @@ -843,15 +842,13 @@ ## assert key is not None ## self.unerase(w_dict.dstorage)[key] = w_value - """ - def getitem(self, w_dict, w_key): - space = self.space - # -- This is called extremely often. Hack for performance -- - if type(w_key) is space.StringObjectCls: - return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None) - # -- End of performance hack -- - return AbstractTypedStrategy.getitem(self, w_dict, w_key) - """ + ##def getitem(self, w_dict, w_key): + ## space = self.space + ## # -- This is called extremely often. Hack for performance -- + ## if type(w_key) is space.StringObjectCls: + ## return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None) + ## # -- End of performance hack -- + ## return AbstractTypedStrategy.getitem(self, w_dict, w_key) ##def getitem_str(self, w_dict, key): ## assert key is not None @@ -866,19 +863,19 @@ def wrapkey(space, key): return space.wrapbytes(key) - @jit.look_inside_iff(lambda self, w_dict: - w_dict_unrolling_heuristic(w_dict)) - def view_as_kwargs(self, w_dict): - return (None, None) # XXX: fix me to return unicode keys - d = self.unerase(w_dict.dstorage) - l = len(d) - keys, values = [None] * l, [None] * l - i = 0 - for key, val in d.iteritems(): - keys[i] = key - values[i] = val - i += 1 - return keys, values + ##@jit.look_inside_iff(lambda self, w_dict: + ## w_dict_unrolling_heuristic(w_dict)) + ##def view_as_kwargs(self, w_dict): + ## return (None, None) # XXX: fix me to return unicode keys + ## d = self.unerase(w_dict.dstorage) + ## l = len(d) + ## keys, values = [None] * l, [None] * l + ## i = 0 + ## for key, val in d.iteritems(): + ## keys[i] = key + ## values[i] = val + ## i += 1 + ## return keys, values create_iterator_classes(BytesDictStrategy) @@ -916,17 +913,6 @@ space = self.space # -- This is called extremely often. Hack for performance -- if type(w_key) is space.UnicodeObjectCls: - #return self.getitem_str(w_dict, w_key.unwrap(space)) - # XXX: - #key = w_key.unwrap(space) # XXX: - #return self.unerase(w_dict.dstorage).get(key, None) - # XXX: triggers failure because s.unwrapped isn't set o_O - - #assert type(self) is UnicodeDictStrategy - #key = self.unwrap(w_key) - #assert key is not None - #return self.unerase(w_dict.dstorage).get(key, None) - #return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None) return self.unerase(w_dict.dstorage).get(w_key.unwrap(space), None) # -- End of performance hack -- return AbstractTypedStrategy.getitem(self, w_dict, w_key) @@ -938,9 +924,8 @@ def listview_unicode(self, w_dict): return self.unerase(w_dict.dstorage).keys() - #def w_keys(self, w_dict): - # # XXX: we can completely kill w_keys on py3k - # return self.space.newlist_str(self.listview_str(w_dict)) + def w_keys(self, w_dict): + return self.space.newlist_unicode(self.listview_unicode(w_dict)) def wrapkey(space, key): return space.wrap(key) @@ -977,8 +962,6 @@ def is_correct_type(self, w_obj): from pypy.objspace.std.intobject import W_IntObject - #space = self.space - #return space.is_w(space.type(w_obj), space.w_int) return type(w_obj) is W_IntObject def _never_equal_to(self, w_lookup_type): diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -40,7 +40,6 @@ def is_correct_type(self, w_obj): space = self.space return space.is_w(space.type(w_obj), space.w_unicode) - #return type(w_obj) is space.UnicodeObjectCls def _never_equal_to(self, w_lookup_type): return False diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -80,7 +80,6 @@ return space.fromcache(IntegerListStrategy) # check for strings - # XXX: StringListStrategy is currently broken for w_obj in list_w: if not type(w_obj) is W_BytesObject: break diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import py from pypy.objspace.std.celldict import ModuleDictStrategy @@ -70,7 +71,6 @@ assert "ModuleDictStrategy" in __pypy__.internal_repr(obj) def test_check_module_uses_module_dict(self): - #py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") m = type(__builtins__)("abc") self.impl_used(m.__dict__) @@ -133,9 +133,12 @@ def setup_class(cls): if cls.runappdirect: py.test.skip("__repr__ doesn't work on appdirect") - strategy = ModuleDictStrategy(cls.space) + + def setup_method(self, method): + space = self.space + strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + self.w_d = W_DictMultiObject(space, strategy, storage) def test_popitem(self): import __pypy__ @@ -148,7 +151,6 @@ assert x == ("a", 3) def test_degenerate(self): - #py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") import __pypy__ d = self.d @@ -157,3 +159,23 @@ del d["a"] d[object()] = 5 assert list(d.values()) == [5] + + def test_unicode(self): + import __pypy__ + + d = self.d + assert "ModuleDict" in __pypy__.internal_repr(d) + d['λ'] = True + assert "ModuleDict" in __pypy__.internal_repr(d) + assert list(d) == ['λ'] + assert next(iter(d)) == 'λ' + assert "ModuleDict" in __pypy__.internal_repr(d) + + d['foo'] = 'bar' + assert sorted(d) == ['foo', 'λ'] + assert "ModuleDict" in __pypy__.internal_repr(d) + + o = object() + d[o] = 'baz' + assert set(d) == set(['foo', 'λ', o]) + assert "ObjectDictStrategy" in __pypy__.internal_repr(d) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -125,7 +125,6 @@ assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) def test_fromkeys_fastpath(self): - py.test.py3k_skip("XXX: list strategies are currently broken") space = self.space w = space.wrap wb = space.wrapbytes @@ -175,8 +174,7 @@ w_d.initialize_content([(wb("a"), w(1)), (wb("b"), w(6))]) w_k = self.space.call_method(w_d, "keys") w_l = self.space.call_function(self.space.w_list, w_k) - #XXX: py.test.py3k_skip("XXX: list strategies are currently broken") - #assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] + assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), # but we need space.newlist_unicode for it @@ -1018,8 +1016,10 @@ #raises(RuntimeError, list, it) -class FakeWrapper(object): +class FakeString(str): + hash_count = 0 + def unwrap(self, space): self.unwrapped = True return str(self) @@ -1028,13 +1028,13 @@ self.hash_count += 1 return str.__hash__(self) -class FakeString(FakeWrapper, str): +class FakeUnicode(unicode): - def __hash__(self): - self.hash_count += 1 - return str.__hash__(self) + hash_count = 0 -class FakeUnicode(FakeWrapper, unicode): + def unwrap(self, space): + self.unwrapped = True + return unicode(self) def __hash__(self): self.hash_count += 1 @@ -1168,7 +1168,7 @@ class BaseTestRDictImplementation: FakeString = FakeUnicode - allows__str = False # XXX: this is maybe not necessary, just add tests to unicode to ensure we're allowing utf-8? + _str_devolves = False def setup_method(self,method): self.fakespace = FakeSpace() @@ -1177,8 +1177,6 @@ self.impl = self.get_impl() def wrapstrorunicode(self, obj): - # XXX: blargh this is all screwed. its referencing FakeString - # and using regular strings to setitem. return self.fakespace.wrap(obj) def get_impl(self): @@ -1208,7 +1206,7 @@ else: assert a == self.string2 assert b == 2000 - if self.allows__str: + if not self._str_devolves: result = self.impl.getitem_str(self.string) else: result = self.impl.getitem(self.string) @@ -1219,7 +1217,7 @@ self.impl.setitem(self.string, 1000) assert self.impl.length() == 1 assert self.impl.getitem(self.string) == 1000 - if self.allows__str: + if not self._str_devolves: result = self.impl.getitem_str(self.string) else: result = self.impl.getitem(self.string) @@ -1329,8 +1327,6 @@ class TestUnicodeDictImplementation(BaseTestRDictImplementation): StrategyClass = UnicodeDictStrategy - FakeString = FakeUnicode - allows__str = True def test_str_shortcut(self): self.fill_impl() @@ -1352,6 +1348,7 @@ class TestBytesDictImplementation(BaseTestRDictImplementation): StrategyClass = BytesDictStrategy FakeString = FakeString + _str_devolves = True def wrapstrorunicode(self, obj): return self.fakespace.wrapbytes(obj) diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py --- a/pypy/objspace/std/test/test_identitydict.py +++ b/pypy/objspace/std/test/test_identitydict.py @@ -1,8 +1,6 @@ import py from pypy.interpreter.gateway import interp2app -py.test.py3k_skip("XXX: strategies are currently broken") - class AppTestComparesByIdentity: spaceconfig = {"objspace.std.withidentitydict": True} diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -167,6 +167,7 @@ d = f(λ=True) assert list(d) == ['λ'] + assert next(iter(d)) == 'λ' assert "KwargsDictStrategy" in self.get_strategy(d) d['foo'] = 'bar' diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -402,7 +402,6 @@ self.space.w_True) def test_sizehint(self): - py.test.py3k_skip("XXX: strategies are currently broken") space = self.space w_l = space.newlist([], sizehint=10) assert isinstance(w_l.strategy, SizeListStrategy) @@ -419,7 +418,6 @@ assert w_lst.strategy.sizehint == 13 def test_find_fast_on_intlist(self, monkeypatch): - py.test.py3k_skip("XXX: strategies are currently broken") monkeypatch.setattr(self.space, "eq_w", None) w = self.space.wrap intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,3 @@ -import py import sys from pypy.objspace.std.listobject import ( W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, @@ -7,7 +6,6 @@ from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject -#py.test.py3k_skip("XXX: strategies are currently broken") class TestW_ListStrategies(TestW_ListObject): def test_check_strategy(self): @@ -580,9 +578,11 @@ assert not self.space.eq_w(l1, l2) def test_weird_rangelist_bug(self): - l = make_range_list(self.space, 1, 1, 3) + space = self.space + l = make_range_list(space, 1, 1, 3) # should not raise - assert l.descr_getslice(self.space, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) + w_slice = space.newslice(space.wrap(15), space.wrap(2222), space.wrap(1)) + assert l.descr_getitem(space, w_slice).strategy == space.fromcache(EmptyListStrategy) def test_add_to_rangelist(self): l1 = make_range_list(self.space, 1, 1, 3) @@ -681,8 +681,6 @@ assert space.unwrap(w_res) == 3 def test_create_list_from_set(self): - # this test fails because of the "w_set.iter = None" line below - py.test.py3k_skip("missing the correct list strategy") from pypy.objspace.std.setobject import W_SetObject from pypy.objspace.std.setobject import _initialize_set diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -84,7 +84,6 @@ assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) def test_create_set_from_list(self): - py.test.py3k_skip("XXX: strategies are currently broken") from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.setobject import BytesSetStrategy, ObjectSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject @@ -131,11 +130,11 @@ intstr.get_storage_from_list = tmp_func def test_listview_bytes_int_on_set(self): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap + wb = self.space.wrapbytes w_a = W_SetObject(self.space) - _initialize_set(self.space, w_a, w("abcdefg")) + _initialize_set(self.space, w_a, wb("abcdefg")) assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") assert self.space.listview_int(w_a) is None diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -5,9 +5,6 @@ UnicodeIteratorImplementation, UnicodeSetStrategy) from pypy.objspace.std.listobject import W_ListObject -import py -py.test.py3k_skip("XXX: strategies are currently broken") - class TestW_SetStrategies: def wrapped(self, l, bytes=False): From noreply at buildbot.pypy.org Fri Apr 18 02:43:15 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 02:43:15 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: more progress, have _str methods accept utf-8 encoded strs, for the sake of Message-ID: <20140418004315.5067C1C01F4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70740:806adbe54237 Date: 2014-04-17 17:41 -0700 http://bitbucket.org/pypy/pypy/changeset/806adbe54237/ Log: more progress, have _str methods accept utf-8 encoded strs, for the sake of compat. w/ default diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -401,7 +401,7 @@ def buffer_w(w_self, space): return StringBuffer(w_self._value) - # XXX: could provide listview_int + # XXX: could provide listview_int? #def listview_bytes(self): # return _create_list_from_bytes(self._value) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -117,9 +117,10 @@ w_type) byteslist = space.listview_bytes(w_keys) - if byteslist is not None: + # XXX: py3k could switch this to listview_unicode, but our + # setitem_str accepts utf-8 encoded strs, not unicode! + if False and byteslist is not None: for key in byteslist: - # XXX: bytes is tied to setitem_str here! w_dict.setitem_str(key, w_fill) else: for w_key in space.listview(w_keys): @@ -361,6 +362,9 @@ def get_empty_storage(self): raise NotImplementedError + def decodekey_str(self, key): + return key.decode('utf-8') + @jit.look_inside_iff(lambda self, w_dict: w_dict_unrolling_heuristic(w_dict)) def w_keys(self, w_dict): @@ -701,9 +705,7 @@ def setitem_str(self, w_dict, key, w_value): self.switch_to_object_strategy(w_dict) - # XXX: wrap(key) means we only allow ascii to - # setitem_str. should probaby allow utf-8 - w_dict.setitem(self.space.wrap(key), w_value) + w_dict.setitem(self.space.wrap(self.decodekey_str(key)), w_value) def setdefault(self, w_dict, w_key, w_default): if self.is_correct_type(w_key): @@ -725,9 +727,7 @@ return len(self.unerase(w_dict.dstorage)) def getitem_str(self, w_dict, key): - # XXX: wrapping here caused some issues w/ - # ByteDictStrat.. double check - return self.getitem(w_dict, self.space.wrap(key)) + return self.getitem(w_dict, self.space.wrap(self.decodekey_str(key))) def getitem(self, w_dict, w_key): space = self.space @@ -807,7 +807,7 @@ return self.space.newlist(self.unerase(w_dict.dstorage).keys()) def setitem_str(self, w_dict, s, w_value): - self.setitem(w_dict, self.space.wrap(s), w_value) + self.setitem(w_dict, self.space.wrap(self.decodekey_str(s)), w_value) def switch_to_object_strategy(self, w_dict): assert 0, "should be unreachable" @@ -907,7 +907,7 @@ def setitem_str(self, w_dict, key, w_value): assert key is not None - self.unerase(w_dict.dstorage)[key.decode('ascii')] = w_value + self.unerase(w_dict.dstorage)[self.decodekey_str(key)] = w_value def getitem(self, w_dict, w_key): space = self.space @@ -919,7 +919,7 @@ def getitem_str(self, w_dict, key): assert key is not None - return self.unerase(w_dict.dstorage).get(key.decode('utf-8'), None) + return self.unerase(w_dict.dstorage).get(self.decodekey_str(key), None) def listview_unicode(self, w_dict): return self.unerase(w_dict.dstorage).keys() From noreply at buildbot.pypy.org Fri Apr 18 03:47:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 03:47:00 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup nditer Message-ID: <20140418014700.395B31C01F4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70741:7ee3a18d1aca Date: 2014-04-17 21:35 -0400 http://bitbucket.org/pypy/pypy/changeset/7ee3a18d1aca/ Log: cleanup nditer diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -172,14 +172,13 @@ size = 1 self.slicesize = support.product(shape) self.gap = strides - ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) def getslice(self): from pypy.module.micronumpy.concrete import SliceArray - retVal = SliceArray(self.offset, self.gap, self.backstrides, - [self.slicesize], self.arr.implementation, self.arr, self.dtype) - return retVal + return SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, + self.arr, self.dtype) def AxisIter(array, shape, axis, cumulative): diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -1,14 +1,14 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import ufuncs, support from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement, shape_agreement_multiple) -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.concrete import SliceArray from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy import ufuncs, support +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) class AbstractIterator(object): @@ -21,8 +21,10 @@ def getitem(self, space, array): raise NotImplementedError("Abstract Class") + class IteratorMixin(object): _mixin_ = True + def __init__(self, it, op_flags): self.it = it self.op_flags = op_flags @@ -39,28 +41,33 @@ def setitem(self, space, array, val): xxx + class BoxIterator(IteratorMixin, AbstractIterator): index = 0 + class ExternalLoopIterator(IteratorMixin, AbstractIterator): index = 1 + def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): ret = [] if space.is_w(w_op_flags, space.w_None): for i in range(n): ret.append(OpFlag()) elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ - space.isinstance_w(w_op_flags, space.w_list): - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) + space.isinstance_w(w_op_flags, space.w_list): + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) else: w_lst = space.listview(w_op_flags) if space.isinstance_w(w_lst[0], space.w_tuple) or \ space.isinstance_w(w_lst[0], space.w_list): if len(w_lst) != n: - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) for item in w_lst: ret.append(parse_one_arg(space, space.listview(item))) else: @@ -69,6 +76,7 @@ ret.append(op_flag) return ret + class OpFlag(object): def __init__(self): self.rw = 'r' @@ -80,21 +88,26 @@ self.allocate = False self.get_it_item = (get_readonly_item, get_readonly_slice) + def get_readonly_item(space, array, it): return space.wrap(it.getitem()) + def get_readwrite_item(space, array, it): #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) + res = SliceArray(it.array.start + it.offset, [0], [0], [1], it.array, array) #it.dtype.setitem(res, 0, it.getitem()) return W_NDimArray(res) + def get_readonly_slice(space, array, it): return W_NDimArray(it.getslice().readonly()) + def get_readwrite_slice(space, array, it): return W_NDimArray(it.getslice()) + def parse_op_flag(space, lst): op_flag = OpFlag() for w_item in lst: @@ -121,16 +134,16 @@ op_flag.allocate = True elif item == 'no_subtype': raise OperationError(space.w_NotImplementedError, space.wrap( - '"no_subtype" op_flag not implemented yet')) + '"no_subtype" op_flag not implemented yet')) elif item == 'arraymask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"arraymask" op_flag not implemented yet')) + '"arraymask" op_flag not implemented yet')) elif item == 'writemask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"writemask" op_flag not implemented yet')) + '"writemask" op_flag not implemented yet')) else: raise OperationError(space.w_ValueError, space.wrap( - 'op_flags must be a tuple or array of per-op flag-tuples')) + 'op_flags must be a tuple or array of per-op flag-tuples')) if op_flag.rw == 'r': op_flag.get_it_item = (get_readonly_item, get_readonly_slice) elif op_flag.rw == 'rw': @@ -140,20 +153,22 @@ op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) return op_flag + def parse_func_flags(space, nditer, w_flags): if space.is_w(w_flags, space.w_None): return elif not space.isinstance_w(w_flags, space.w_tuple) and not \ - space.isinstance_w(w_flags, space.w_list): + space.isinstance_w(w_flags, space.w_list): raise OperationError(space.w_ValueError, space.wrap( - 'Iter global flags must be a list or tuple of strings')) + 'Iter global flags must be a list or tuple of strings')) lst = space.listview(w_flags) for w_item in lst: if not space.isinstance_w(w_item, space.w_str) and not \ - space.isinstance_w(w_item, space.w_unicode): + space.isinstance_w(w_item, space.w_unicode): typename = space.type(w_item).getname(space) - raise OperationError(space.w_TypeError, space.wrap( - 'expected string or Unicode object, %s found' % typename)) + raise oefmt(space.w_TypeError, + 'expected string or Unicode object, %s found', + typename) item = space.str_w(w_item) if item == 'external_loop': raise OperationError(space.w_NotImplementedError, space.wrap( @@ -187,21 +202,24 @@ elif item == 'zerosize_ok': nditer.zerosize_ok = True else: - raise OperationError(space.w_ValueError, space.wrap( - 'Unexpected iterator global flag "%s"' % item)) + raise oefmt(space.w_ValueError, + 'Unexpected iterator global flag "%s"', + item) if nditer.tracked_index and nditer.external_loop: - raise OperationError(space.w_ValueError, space.wrap( - 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' - 'multi-index is being tracked')) + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + def is_backward(imp, order): if order == 'K' or (order == 'C' and imp.order == 'C'): return False - elif order =='F' and imp.order == 'C': + elif order == 'F' and imp.order == 'C': return True else: raise NotImplementedError('not implemented yet') + def get_iter(space, order, arr, shape, dtype): imp = arr.implementation backward = is_backward(imp, order) @@ -223,11 +241,13 @@ shape, backward) return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + def get_external_loop_iter(space, order, arr, shape): imp = arr.implementation backward = is_backward(imp, order) return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + def convert_to_array_or_none(space, w_elem): ''' None will be passed through, all others will be converted @@ -263,10 +283,10 @@ ret += self.index[i] * self.shape[i - 1] return ret + class W_NDIter(W_Root): - def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, - w_op_axes, w_itershape, w_buffersize, order): + w_op_axes, w_itershape, w_buffersize, order): self.order = order self.external_loop = False self.buffered = False @@ -288,7 +308,7 @@ w_seq_as_list = space.listview(w_seq) self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] else: - self.seq =[convert_to_array(space, w_seq)] + self.seq = [convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, @@ -308,9 +328,9 @@ self.dtypes = [] # handle None or writable operands, calculate my shape - self.iters=[] - outargs = [i for i in range(len(self.seq)) \ - if self.seq[i] is None or self.op_flags[i].rw == 'w'] + self.iters = [] + outargs = [i for i in range(len(self.seq)) + if self.seq[i] is None or self.op_flags[i].rw == 'w'] if len(outargs) > 0: out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: @@ -326,13 +346,13 @@ for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].get_it_item = (get_readwrite_item, - get_readwrite_slice) + get_readwrite_slice) self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue - out_dtype = ufuncs.find_binop_result_dtype(space, - self.seq[i].get_dtype(), out_dtype) + out_dtype = ufuncs.find_binop_result_dtype( + space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? @@ -372,13 +392,17 @@ # create an iterator for each operand if self.external_loop: for i in range(len(self.seq)): - self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, - self.seq[i], iter_shape), self.op_flags[i])) + self.iters.append(ExternalLoopIterator( + get_external_loop_iter( + space, self.order, self.seq[i], iter_shape), + self.op_flags[i])) else: for i in range(len(self.seq)): - self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i], iter_shape, self.dtypes[i]), - self.op_flags[i])) + self.iters.append(BoxIterator( + get_iter( + space, self.order, self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) @@ -435,7 +459,7 @@ for i in range(len(self.iters)): res.append(self.iters[i].getitem(space, self.seq[i])) self.iters[i].next() - if len(res) <2: + if len(res) < 2: return res[0] return space.newtuple(res) @@ -551,14 +575,14 @@ 'not implemented yet')) - at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), - w_op_dtypes = WrappedDefault(None), order=str, + at unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): + w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order) + w_itershape, w_buffersize, order) W_NDIter.typedef = TypeDef( 'nditer', From noreply at buildbot.pypy.org Fri Apr 18 03:47:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 03:47:01 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140418014701.60BC71C01F4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70742:cbf4dcd31689 Date: 2014-04-17 21:45 -0400 http://bitbucket.org/pypy/pypy/changeset/cbf4dcd31689/ Log: merge heads diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -3,15 +3,18 @@ indirection is introduced to make the version tag change less often. """ +from rpython.rlib import jit, rerased + from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy, _never_equal_to_string -from pypy.objspace.std.dictmultiobject import ObjectDictStrategy -from rpython.rlib import jit, rerased +from pypy.objspace.std.dictmultiobject import ( + DictStrategy, ObjectDictStrategy, _never_equal_to_string, + create_iterator_classes) + class VersionTag(object): pass + class ModuleCell(W_Root): def __init__(self, w_value=None): self.w_value = w_value @@ -19,11 +22,17 @@ def __repr__(self): return "" % (self.w_value, ) + def unwrap_cell(w_value): if isinstance(w_value, ModuleCell): return w_value.w_value return w_value + +def _wrapkey(space, key): + return space.wrap(key) + + class ModuleDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("modulecell") @@ -55,7 +64,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space if space.is_w(space.type(w_key), space.w_str): - self.setitem_str(w_dict, self.space.str_w(w_key), w_value) + self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) w_dict.setitem(w_key, w_value) @@ -66,8 +75,8 @@ cell.w_value = w_value return if cell is not None: - # If the new value and the current value are the same, don't create a - # level of indirection, or mutate the version. + # If the new value and the current value are the same, don't + # create a level of indirection, or mutate the version. if self.space.is_w(w_value, cell): return w_value = ModuleCell(w_value) @@ -121,8 +130,8 @@ return w_dict.getitem(w_key) def getitem_str(self, w_dict, key): - w_res = self.getdictvalue_no_unwrapping(w_dict, key) - return unwrap_cell(w_res) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + return unwrap_cell(cell) def w_keys(self, w_dict): space = self.space @@ -136,37 +145,43 @@ def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([space.wrap(key), unwrap_cell(cell)]) - for key, cell in iterator()] + return [space.newtuple([_wrapkey(space, key), unwrap_cell(cell)]) + for key, cell in iterator()] def clear(self, w_dict): self.unerase(w_dict.dstorage).clear() self.mutated() def popitem(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - key, w_value = d.popitem() + key, cell = d.popitem() self.mutated() - return self.space.wrap(key), unwrap_cell(w_value) + return _wrapkey(space, key), unwrap_cell(cell) def switch_to_object_strategy(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - strategy = self.space.fromcache(ObjectDictStrategy) + strategy = space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - d_new[self.space.wrap(key)] = unwrap_cell(cell) + d_new[_wrapkey(space, key)] = unwrap_cell(cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): return self.unerase(w_dict.dstorage).iterkeys() + def getitervalues(self, w_dict): return self.unerase(w_dict.dstorage).itervalues() + def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() - def wrapkey(space, key): - return space.wrap(key) + + wrapkey = _wrapkey + def wrapvalue(space, value): return unwrap_cell(value) + create_iterator_classes(ModuleDictStrategy) diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -1,12 +1,19 @@ -## ---------------------------------------------------------------------------- -## dict strategy (see dictmultiobject.py) +"""dict implementation specialized for keyword argument dicts. -from rpython.rlib import rerased, jit +Based on two lists containing unwrapped key value pairs. +""" + +from rpython.rlib import jit, rerased + from pypy.objspace.std.dictmultiobject import ( BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, create_iterator_classes) +def _wrapkey(space, key): + return space.wrap(key) + + class EmptyKwargsDictStrategy(EmptyDictStrategy): def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) @@ -21,7 +28,7 @@ unerase = staticmethod(unerase) def wrap(self, key): - return self.space.wrap(key) + return _wrapkey(self.space, key) def unwrap(self, wrapped): return self.space.str_w(wrapped) @@ -117,16 +124,14 @@ def items(self, w_dict): space = self.space keys, values_w = self.unerase(w_dict.dstorage) - result = [] - for i in range(len(keys)): - result.append(space.newtuple([self.wrap(keys[i]), values_w[i]])) - return result + return [space.newtuple([self.wrap(keys[i]), values_w[i]]) + for i in range(len(keys))] def popitem(self, w_dict): keys, values_w = self.unerase(w_dict.dstorage) key = keys.pop() w_value = values_w.pop() - return (self.wrap(key), w_value) + return self.wrap(key), w_value def clear(self, w_dict): w_dict.dstorage = self.get_empty_storage() @@ -164,17 +169,15 @@ keys = self.unerase(w_dict.dstorage)[0] return iter(range(len(keys))) - def wrapkey(space, key): - return space.wrap(key) + wrapkey = _wrapkey def next_item(self): strategy = self.strategy assert isinstance(strategy, KwargsDictStrategy) for i in self.iterator: - keys, values_w = strategy.unerase( - self.dictimplementation.dstorage) - return self.space.wrap(keys[i]), values_w[i] + keys, values_w = strategy.unerase(self.dictimplementation.dstorage) + return _wrapkey(self.space, keys[i]), values_w[i] else: return None, None diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,42 +1,47 @@ import py + +from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject -from pypy.objspace.std.celldict import ModuleCell, ModuleDictStrategy -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, \ - BaseTestRDictImplementation, BaseTestDevolvedDictImplementation -from pypy.interpreter import gateway +from pypy.objspace.std.test.test_dictmultiobject import ( + BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, + FakeString) space = FakeSpace() class TestCellDict(object): + FakeString = FakeString + def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() d = W_DictMultiObject(space, strategy, storage) v1 = strategy.version - d.setitem("a", 1) + key = "a" + w_key = self.FakeString(key) + d.setitem(w_key, 1) v2 = strategy.version assert v1 is not v2 - assert d.getitem("a") == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, "a") == 1 + assert d.getitem(w_key) == 1 + assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 - d.setitem("a", 2) + d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 - assert d.getitem("a") == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 2 + assert d.getitem(w_key) == 2 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 - d.setitem("a", 3) + d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 - assert d.getitem("a") == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 3 + assert d.getitem(w_key) == 3 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 - d.delitem("a") + d.delitem(w_key) v5 = strategy.version assert v5 is not v4 - assert d.getitem("a") is None - assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + assert d.getitem(w_key) is None + assert d.strategy.getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1254,12 +1254,13 @@ return other == "s" d = self.get_impl() - d.setitem("s", 12) - assert d.getitem("s") == 12 - assert d.getitem(F()) == d.getitem("s") + w_key = FakeString("s") + d.setitem(w_key, 12) + assert d.getitem(w_key) == 12 + assert d.getitem(F()) == d.getitem(w_key) d = self.get_impl() - x = d.setdefault("s", 12) + x = d.setdefault(w_key, 12) assert x == 12 x = d.setdefault(F(), 12) assert x == 12 @@ -1269,10 +1270,10 @@ assert x == 12 d = self.get_impl() - d.setitem("s", 12) + d.setitem(w_key, 12) d.delitem(F()) - assert "s" not in d.w_keys() + assert w_key not in d.w_keys() assert F() not in d.w_keys() class TestBytesDictImplementation(BaseTestRDictImplementation): From noreply at buildbot.pypy.org Fri Apr 18 04:50:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 04:50:28 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: add a test_zjit for where Message-ID: <20140418025028.6B0471C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70743:6072af059033 Date: 2014-04-17 21:11 -0400 http://bitbucket.org/pypy/pypy/changeset/6072af059033/ Log: add a test_zjit for where diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -539,7 +539,7 @@ def define_dot(): return """ a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] - b=[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] + b = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] c = dot(a, b) c -> 1 -> 2 """ @@ -595,3 +595,31 @@ def test_argsort(self): result = self.run("argsort") assert result == 6 + + def define_where(): + return """ + a = [1, 0, 1, 0] + x = [1, 2, 3, 4] + y = [-10, -20, -30, -40] + r = where(a, x, y) + r -> 3 + """ + + def test_where(self): + result = self.run("where") + assert result == -40 + self.check_trace_count(1) + self.check_simple_loop({ + 'float_ne': 1, + 'getarrayitem_gc': 4, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 5, + 'int_add': 12, + 'int_ge': 1, + 'int_lt': 4, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 4, + }) From noreply at buildbot.pypy.org Fri Apr 18 04:50:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 04:50:29 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: merge default Message-ID: <20140418025029.EC2471C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70744:892ea7009bdf Date: 2014-04-17 21:47 -0400 http://bitbucket.org/pypy/pypy/changeset/892ea7009bdf/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -139,3 +139,4 @@ Fix issues with reimporting builtin modules .. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -156,6 +157,38 @@ self.array.setitem(state.offset, elem) +class SliceIterator(ArrayIter): + def __init__(self, arr, strides, backstrides, shape, order="C", + backward=False, dtype=None): + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self.arr = arr + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.elsize] + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) + else: + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 1 + self.slicesize = support.product(shape) + self.gap = strides + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) + + def getslice(self): + from pypy.module.micronumpy.concrete import SliceArray + return SliceArray(self.offset, self.gap, self.backstrides, + [self.slicesize], self.arr.implementation, + self.arr, self.dtype) + + def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/nditer.py @@ -0,0 +1,619 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import ufuncs, support +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.strides import (calculate_broadcast_strides, + shape_agreement, shape_agreement_multiple) + + +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, space, array): + raise NotImplementedError("Abstract Class") + + +class IteratorMixin(object): + _mixin_ = True + + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item[self.index](space, array, self.it) + + def setitem(self, space, array, val): + xxx + + +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 + + +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 + + +def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + ret = [] + if space.is_w(w_op_flags, space.w_None): + for i in range(n): + ret.append(OpFlag()) + elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) + else: + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) + return ret + + +class OpFlag(object): + def __init__(self): + self.rw = 'r' + self.broadcast = True + self.force_contig = False + self.force_align = False + self.native_byte_order = False + self.tmp_copy = '' + self.allocate = False + self.get_it_item = (get_readonly_item, get_readonly_slice) + + +def get_readonly_item(space, array, it): + return space.wrap(it.getitem()) + + +def get_readwrite_item(space, array, it): + #create a single-value view (since scalars are not views) + res = SliceArray(it.array.start + it.offset, [0], [0], [1], it.array, array) + #it.dtype.setitem(res, 0, it.getitem()) + return W_NDimArray(res) + + +def get_readonly_slice(space, array, it): + return W_NDimArray(it.getslice().readonly()) + + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + + +def parse_op_flag(space, lst): + op_flag = OpFlag() + for w_item in lst: + item = space.str_w(w_item) + if item == 'readonly': + op_flag.rw = 'r' + elif item == 'readwrite': + op_flag.rw = 'rw' + elif item == 'writeonly': + op_flag.rw = 'w' + elif item == 'no_broadcast': + op_flag.broadcast = False + elif item == 'contig': + op_flag.force_contig = True + elif item == 'aligned': + op_flag.force_align = True + elif item == 'nbo': + op_flag.native_byte_order = True + elif item == 'copy': + op_flag.tmp_copy = 'r' + elif item == 'updateifcopy': + op_flag.tmp_copy = 'rw' + elif item == 'allocate': + op_flag.allocate = True + elif item == 'no_subtype': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"no_subtype" op_flag not implemented yet')) + elif item == 'arraymask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"arraymask" op_flag not implemented yet')) + elif item == 'writemask': + raise OperationError(space.w_NotImplementedError, space.wrap( + '"writemask" op_flag not implemented yet')) + else: + raise OperationError(space.w_ValueError, space.wrap( + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == 'r': + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) + elif op_flag.rw == 'rw': + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + elif op_flag.rw == 'w': + # XXX Extra logic needed to make sure writeonly + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + return op_flag + + +def parse_func_flags(space, nditer, w_flags): + if space.is_w(w_flags, space.w_None): + return + elif not space.isinstance_w(w_flags, space.w_tuple) and not \ + space.isinstance_w(w_flags, space.w_list): + raise OperationError(space.w_ValueError, space.wrap( + 'Iter global flags must be a list or tuple of strings')) + lst = space.listview(w_flags) + for w_item in lst: + if not space.isinstance_w(w_item, space.w_str) and not \ + space.isinstance_w(w_item, space.w_unicode): + typename = space.type(w_item).getname(space) + raise oefmt(space.w_TypeError, + 'expected string or Unicode object, %s found', + typename) + item = space.str_w(w_item) + if item == 'external_loop': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer external_loop not implemented yet')) + nditer.external_loop = True + elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) + # For numpy compatability + nditer.buffered = True + elif item == 'c_index': + nditer.tracked_index = 'C' + elif item == 'f_index': + nditer.tracked_index = 'F' + elif item == 'multi_index': + nditer.tracked_index = 'multi' + elif item == 'common_dtype': + nditer.common_dtype = True + elif item == 'delay_bufalloc': + nditer.delay_bufalloc = True + elif item == 'grow_inner': + nditer.grow_inner = True + elif item == 'ranged': + nditer.ranged = True + elif item == 'refs_ok': + nditer.refs_ok = True + elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) + nditer.reduce_ok = True + elif item == 'zerosize_ok': + nditer.zerosize_ok = True + else: + raise oefmt(space.w_ValueError, + 'Unexpected iterator global flag "%s"', + item) + if nditer.tracked_index and nditer.external_loop: + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + + +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order == 'F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + + +def get_iter(space, order, arr, shape, dtype): + imp = arr.implementation + backward = is_backward(imp, order) + if arr.is_scalar(): + return ArrayIter(imp, 1, [], [], []) + if (imp.strides[0] < imp.strides[-1] and not backward) or \ + (imp.strides[0] > imp.strides[-1] and backward): + # flip the strides. Is this always true for multidimension? + strides = imp.strides[:] + backstrides = imp.backstrides[:] + shape = imp.shape[:] + strides.reverse() + backstrides.reverse() + shape.reverse() + else: + strides = imp.strides + backstrides = imp.backstrides + r = calculate_broadcast_strides(strides, backstrides, imp.shape, + shape, backward) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) + + +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + backward = is_backward(imp, order) + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + + +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret + + +class W_NDIter(W_Root): + def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, + w_op_axes, w_itershape, w_buffersize, order): + self.order = order + self.external_loop = False + self.buffered = False + self.tracked_index = '' + self.common_dtype = False + self.delay_bufalloc = False + self.grow_inner = False + self.ranged = False + self.refs_ok = False + self.reduce_ok = False + self.zerosize_ok = False + self.index_iter = None + self.done = False + self.first_next = True + self.op_axes = [] + # convert w_seq operands to a list of W_NDimArray + if space.isinstance_w(w_seq, space.w_tuple) or \ + space.isinstance_w(w_seq, space.w_list): + w_seq_as_list = space.listview(w_seq) + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + else: + self.seq = [convert_to_array(space, w_seq)] + + parse_func_flags(space, self, w_flags) + self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, + len(self.seq), parse_op_flag) + # handle w_op_axes + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) + + # handle w_op_dtypes part 1: creating self.dtypes list from input + if not space.is_none(w_op_dtypes): + w_seq_as_list = space.listview(w_op_dtypes) + self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] + if len(self.dtypes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap( + "op_dtypes must be a tuple/list matching the number of ops")) + else: + self.dtypes = [] + + # handle None or writable operands, calculate my shape + self.iters = [] + outargs = [i for i in range(len(self.seq)) + if self.seq[i] is None or self.op_flags[i].rw == 'w'] + if len(outargs) > 0: + out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) + else: + out_shape = None + self.shape = iter_shape = shape_agreement_multiple(space, self.seq, + shape=out_shape) + if len(outargs) > 0: + # Make None operands writeonly and flagged for allocation + if len(self.dtypes) > 0: + out_dtype = self.dtypes[outargs[0]] + else: + out_dtype = None + for i in range(len(self.seq)): + if self.seq[i] is None: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + continue + if self.op_flags[i].rw == 'w': + continue + out_dtype = ufuncs.find_binop_result_dtype( + space, self.seq[i].get_dtype(), out_dtype) + for i in outargs: + if self.seq[i] is None: + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) + else: + if not self.op_flags[i].broadcast: + # Raises if ooutput cannot be broadcast + shape_agreement(space, iter_shape, self.seq[i], False) + + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + if self.tracked_index == "multi": + backward = False + else: + backward = self.order != self.tracked_index + self.index_iter = IndexIterator(iter_shape, backward=backward) + + # handle w_op_dtypes part 2: copy where needed if possible + if len(self.dtypes) > 0: + for i in range(len(self.seq)): + selfd = self.dtypes[i] + seq_d = self.seq[i].get_dtype() + if not selfd: + self.dtypes[i] = seq_d + elif selfd != seq_d: + if not 'r' in self.op_flags[i].tmp_copy: + raise OperationError(space.w_TypeError, space.wrap( + "Iterator operand required copying or buffering for operand %d" % i)) + impl = self.seq[i].implementation + new_impl = impl.astype(space, selfd) + self.seq[i] = W_NDimArray(new_impl) + else: + #copy them from seq + self.dtypes = [s.get_dtype() for s in self.seq] + + # create an iterator for each operand + if self.external_loop: + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator( + get_external_loop_iter( + space, self.order, self.seq[i], iter_shape), + self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator( + get_iter( + space, self.order, self.seq[i], iter_shape, self.dtypes[i]), + self.op_flags[i])) + + def set_op_axes(self, space, w_op_axes): + if space.len_w(w_op_axes) != len(self.seq): + raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + op_axes = space.listview(w_op_axes) + l = -1 + for w_axis in op_axes: + if not space.is_none(w_axis): + axis_len = space.len_w(w_axis) + if l == -1: + l = axis_len + elif axis_len != l: + raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + if l == -1: + raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 + + def descr_iter(self, space): + return space.wrap(self) + + def descr_getitem(self, space, w_idx): + idx = space.int_w(w_idx) + try: + ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + except IndexError: + raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + return ret + + def descr_setitem(self, space, w_idx, w_value): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_len(self, space): + space.wrap(len(self.iters)) + + def descr_next(self, space): + for it in self.iters: + if not it.done(): + break + else: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + res = [] + if self.index_iter: + if not self.first_next: + self.index_iter.next() + else: + self.first_next = False + for i in range(len(self.iters)): + res.append(self.iters[i].getitem(space, self.seq[i])) + self.iters[i].next() + if len(res) < 2: + return res[0] + return space.newtuple(res) + + def iternext(self): + if self.index_iter: + self.index_iter.next() + for i in range(len(self.iters)): + self.iters[i].next() + for it in self.iters: + if not it.done(): + break + else: + self.done = True + return self.done + return self.done + + def descr_iternext(self, space): + return space.wrap(self.iternext()) + + def descr_copy(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_debug_print(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_enable_external_loop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + @unwrap_spec(axis=int) + def descr_remove_axis(self, space, axis): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_remove_multi_index(self, space, w_multi_index): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_reset(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_operands(self, space): + l_w = [] + for op in self.seq: + l_w.append(op.descr_view(space)) + return space.newlist(l_w) + + def descr_get_dtypes(self, space): + res = [None] * len(self.seq) + for i in range(len(self.seq)): + res[i] = self.seq[i].descr_get_dtype(space) + return space.newtuple(res) + + def descr_get_finished(self, space): + return space.wrap(self.done) + + def descr_get_has_delayed_bufalloc(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_has_index(self, space): + return space.wrap(self.tracked_index in ["C", "F"]) + + def descr_get_index(self, space): + if not self.tracked_index in ["C", "F"]: + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.wrap(self.index_iter.getvalue()) + + def descr_get_has_multi_index(self, space): + return space.wrap(self.tracked_index == "multi") + + def descr_get_multi_index(self, space): + if not self.tracked_index == "multi": + raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + if self.done: + raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + return space.newtuple([space.wrap(x) for x in self.index_iter.index]) + + def descr_get_iterationneedsapi(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_iterindex(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_itersize(self, space): + return space.wrap(support.product(self.shape)) + + def descr_get_itviews(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_ndim(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_nop(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_shape(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + def descr_get_value(self, space): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + + at unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes=WrappedDefault(None), order=str, + w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) +def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order='K'): + return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, + w_itershape, w_buffersize, order) + +W_NDIter.typedef = TypeDef( + 'nditer', + __iter__ = interp2app(W_NDIter.descr_iter), + __getitem__ = interp2app(W_NDIter.descr_getitem), + __setitem__ = interp2app(W_NDIter.descr_setitem), + __len__ = interp2app(W_NDIter.descr_len), + + next = interp2app(W_NDIter.descr_next), + iternext = interp2app(W_NDIter.descr_iternext), + copy = interp2app(W_NDIter.descr_copy), + debug_print = interp2app(W_NDIter.descr_debug_print), + enable_external_loop = interp2app(W_NDIter.descr_enable_external_loop), + remove_axis = interp2app(W_NDIter.descr_remove_axis), + remove_multi_index = interp2app(W_NDIter.descr_remove_multi_index), + reset = interp2app(W_NDIter.descr_reset), + + operands = GetSetProperty(W_NDIter.descr_get_operands), + dtypes = GetSetProperty(W_NDIter.descr_get_dtypes), + finished = GetSetProperty(W_NDIter.descr_get_finished), + has_delayed_bufalloc = GetSetProperty(W_NDIter.descr_get_has_delayed_bufalloc), + has_index = GetSetProperty(W_NDIter.descr_get_has_index), + index = GetSetProperty(W_NDIter.descr_get_index), + has_multi_index = GetSetProperty(W_NDIter.descr_get_has_multi_index), + multi_index = GetSetProperty(W_NDIter.descr_get_multi_index), + iterationneedsapi = GetSetProperty(W_NDIter.descr_get_iterationneedsapi), + iterindex = GetSetProperty(W_NDIter.descr_get_iterindex), + itersize = GetSetProperty(W_NDIter.descr_get_itersize), + itviews = GetSetProperty(W_NDIter.descr_get_itviews), + ndim = GetSetProperty(W_NDIter.descr_get_ndim), + nop = GetSetProperty(W_NDIter.descr_get_nop), + shape = GetSetProperty(W_NDIter.descr_get_shape), + value = GetSetProperty(W_NDIter.descr_get_value), +) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -282,14 +282,16 @@ @jit.unroll_safe -def shape_agreement_multiple(space, array_list): +def shape_agreement_multiple(space, array_list, shape=None): """ call shape_agreement recursively, allow elements from array_list to be None (like w_out) """ - shape = array_list[0].get_shape() - for arr in array_list[1:]: + for arr in array_list: if not space.is_none(arr): - shape = shape_agreement(space, shape, arr) + if shape is None: + shape = arr.get_shape() + else: + shape = shape_agreement(space, shape, arr) return shape diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -0,0 +1,302 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNDIter(BaseNumpyAppTest): + def test_basic(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + + for x in nditer(a.T): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + + def test_order(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + r = [] + for x in nditer(a, order='C'): + r.append(x) + assert r == [0, 1, 2, 3, 4, 5] + r = [] + for x in nditer(a, order='F'): + r.append(x) + assert r == [0, 3, 1, 4, 2, 5] + + def test_readwrite(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + for x in nditer(a, op_flags=['readwrite']): + x[...] = 2 * x + assert (a == [[0, 2, 4], [6, 8, 10]]).all() + + def test_external_loop(self): + from numpy import arange, nditer, array + a = arange(24).reshape(2, 3, 4) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + r = [] + n = 0 + for x in nditer(a, flags=['external_loop']): + r.append(x) + n += 1 + assert n == 1 + assert (array(r) == range(24)).all() + r = [] + n = 0 + for x in nditer(a, flags=['external_loop'], order='F'): + r.append(x) + n += 1 + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = raises(ValueError, 'r[0][0] = 0') + assert str(e.value) == 'assignment destination is read-only' + r = [] + for x in nditer(a.T, flags=['external_loop'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1,24) + assert (array(r) == arange(24)).all() + + def test_index(self): + from numpy import arange, nditer + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + exc = None + try: + it.index + except ValueError, e: + exc = e + assert exc + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + + def test_interface(self): + from numpy import arange, nditer, zeros + import sys + a = arange(6).reshape(2,3) + r = [] + it = nditer(a, flags=['f_index']) + while not it.finished: + r.append((it[0], it.index)) + it.iternext() + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') + while not it.finished: + it[0] = it.multi_index[1] - it.multi_index[0] + it.iternext() + assert (a == [[0, 1, 2], [-1, 0, 1]]).all() + # b = zeros((2, 3)) + # exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) + # assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") + + def test_buffered(self): + from numpy import arange, nditer, array + a = arange(6).reshape(2,3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered']) + skip('nditer buffered not implmented') + r = [] + for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): + r.append(x) + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() + + def test_op_dtype(self): + from numpy import arange, nditer, sqrt, array + a = arange(6).reshape(2,3) - 3 + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) + assert str(exc.value).startswith("Iterator operand required copying or buffering") + r = [] + for x in nditer(a, op_flags=['readonly','copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + r = [] + for x in nditer(a, op_flags=['copy'], + op_dtypes=['complex128']): + r.append(sqrt(x)) + assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], + op_dtypes = ['int64', 'int64', 'float64'], + op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) + for a, b, c in multi: + a[...] = b * c + assert (multi.operands[0] == [4, 6]).all() + + def test_casting(self): + from numpy import arange, nditer + import sys + a = arange(6.) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], + casting='same_kind'): + r.append(x) + assert r == [0., 1., 2., 3., 4., 5.] + exc = raises(TypeError, nditer, a, flags=['buffered'], + op_dtypes=['int32'], casting='same_kind') + assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") + r = [] + b = arange(6) + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + op_flags=['readwrite'], casting='same_kind') + assert str(exc.value).startswith("Iterator requested dtype could not be cast") + + def test_broadcast(self): + from numpy import arange, nditer + a = arange(3) + b = arange(6).reshape(2,3) + r = [] + it = nditer([a, b]) + assert it.itersize == 6 + for x,y in it: + r.append((x, y)) + assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] + a = arange(2) + exc = raises(ValueError, nditer, [a, b]) + assert str(exc.value).find('shapes (2) (2,3)') > 0 + + def test_outarg(self): + from numpy import nditer, zeros, arange + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [1, 2], flags=['external_loop']) + skip('nditer external_loop not implmented') + + def square1(a): + it = nditer([a, None]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square1([1, 2, 3]) == [1, 4, 9]).all() + + def square2(a, out=None): + it = nditer([a, out], flags=['external_loop', 'buffered'], + op_flags=[['readonly'], + ['writeonly', 'allocate', 'no_broadcast']]) + for x,y in it: + y[...] = x*x + return it.operands[1] + assert (square2([1, 2, 3]) == [1, 4, 9]).all() + b = zeros((3, )) + c = square2([1, 2, 3], out=b) + assert (c == [1., 4., 9.]).all() + assert (b == c).all() + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) + assert str(exc.value).find('cannot be broadcasted') > 0 + + def test_outer_product(self): + from numpy import nditer, arange + a = arange(3) + import sys + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, a, flags=['external_loop']) + skip('nditer external_loop not implmented') + b = arange(8).reshape(2,4) + it = nditer([a, b, None], flags=['external_loop'], + op_axes=[[0, -1, -1], [-1, 0, 1], None]) + for x, y, z in it: + z[...] = x*y + assert it.operands[2].shape == (3, 2, 4) + for i in range(a.size): + assert (it.operands[2][i] == a[i]*b).all() + + def test_reduction(self): + from numpy import nditer, arange, array + import sys + a = arange(24).reshape(2, 3, 4) + b = array(0) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') + #reduction operands must be readwrite + for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite']]): + y[...] += x + assert b == 276 + assert b == a.sum() + + # reduction and allocation requires op_axes and initialization + it = nditer([a, None], flags=['reduce_ok', 'external_loop'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + # previous example with buffering, requires more flags and reset + it = nditer([a, None], flags=['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + op_flags=[['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0,1,-1]]) + it.operands[1][...] = 0 + it.reset() + for x, y in it: + y[...] += x + + assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() + assert (it.operands[1] == a.sum(axis=2)).all() + + def test_get_dtypes(self): + from numpy import array, nditer + x = array([1, 2]) + y = array([1.0, 2.0]) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) + + def test_multi_index(self): + import numpy as np + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + res = [] + while not it.finished: + res.append((it[0], it.multi_index)) + it.iternext() + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -3,15 +3,18 @@ indirection is introduced to make the version tag change less often. """ +from rpython.rlib import jit, rerased + from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy, _never_equal_to_string -from pypy.objspace.std.dictmultiobject import ObjectDictStrategy -from rpython.rlib import jit, rerased +from pypy.objspace.std.dictmultiobject import ( + DictStrategy, ObjectDictStrategy, _never_equal_to_string, + create_iterator_classes) + class VersionTag(object): pass + class ModuleCell(W_Root): def __init__(self, w_value=None): self.w_value = w_value @@ -19,11 +22,17 @@ def __repr__(self): return "" % (self.w_value, ) + def unwrap_cell(w_value): if isinstance(w_value, ModuleCell): return w_value.w_value return w_value + +def _wrapkey(space, key): + return space.wrap(key) + + class ModuleDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("modulecell") @@ -55,7 +64,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space if space.is_w(space.type(w_key), space.w_str): - self.setitem_str(w_dict, self.space.str_w(w_key), w_value) + self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) w_dict.setitem(w_key, w_value) @@ -66,8 +75,8 @@ cell.w_value = w_value return if cell is not None: - # If the new value and the current value are the same, don't create a - # level of indirection, or mutate the version. + # If the new value and the current value are the same, don't + # create a level of indirection, or mutate the version. if self.space.is_w(w_value, cell): return w_value = ModuleCell(w_value) @@ -121,8 +130,8 @@ return w_dict.getitem(w_key) def getitem_str(self, w_dict, key): - w_res = self.getdictvalue_no_unwrapping(w_dict, key) - return unwrap_cell(w_res) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + return unwrap_cell(cell) def w_keys(self, w_dict): space = self.space @@ -136,37 +145,43 @@ def items(self, w_dict): space = self.space iterator = self.unerase(w_dict.dstorage).iteritems - return [space.newtuple([space.wrap(key), unwrap_cell(cell)]) - for key, cell in iterator()] + return [space.newtuple([_wrapkey(space, key), unwrap_cell(cell)]) + for key, cell in iterator()] def clear(self, w_dict): self.unerase(w_dict.dstorage).clear() self.mutated() def popitem(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - key, w_value = d.popitem() + key, cell = d.popitem() self.mutated() - return self.space.wrap(key), unwrap_cell(w_value) + return _wrapkey(space, key), unwrap_cell(cell) def switch_to_object_strategy(self, w_dict): + space = self.space d = self.unerase(w_dict.dstorage) - strategy = self.space.fromcache(ObjectDictStrategy) + strategy = space.fromcache(ObjectDictStrategy) d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): - d_new[self.space.wrap(key)] = unwrap_cell(cell) + d_new[_wrapkey(space, key)] = unwrap_cell(cell) w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): return self.unerase(w_dict.dstorage).iterkeys() + def getitervalues(self, w_dict): return self.unerase(w_dict.dstorage).itervalues() + def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() - def wrapkey(space, key): - return space.wrap(key) + + wrapkey = _wrapkey + def wrapvalue(space, value): return unwrap_cell(value) + create_iterator_classes(ModuleDictStrategy) diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -1,12 +1,19 @@ -## ---------------------------------------------------------------------------- -## dict strategy (see dictmultiobject.py) +"""dict implementation specialized for keyword argument dicts. -from rpython.rlib import rerased, jit +Based on two lists containing unwrapped key value pairs. +""" + +from rpython.rlib import jit, rerased + from pypy.objspace.std.dictmultiobject import ( BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, create_iterator_classes) +def _wrapkey(space, key): + return space.wrap(key) + + class EmptyKwargsDictStrategy(EmptyDictStrategy): def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) @@ -21,7 +28,7 @@ unerase = staticmethod(unerase) def wrap(self, key): - return self.space.wrap(key) + return _wrapkey(self.space, key) def unwrap(self, wrapped): return self.space.str_w(wrapped) @@ -117,16 +124,14 @@ def items(self, w_dict): space = self.space keys, values_w = self.unerase(w_dict.dstorage) - result = [] - for i in range(len(keys)): - result.append(space.newtuple([self.wrap(keys[i]), values_w[i]])) - return result + return [space.newtuple([self.wrap(keys[i]), values_w[i]]) + for i in range(len(keys))] def popitem(self, w_dict): keys, values_w = self.unerase(w_dict.dstorage) key = keys.pop() w_value = values_w.pop() - return (self.wrap(key), w_value) + return self.wrap(key), w_value def clear(self, w_dict): w_dict.dstorage = self.get_empty_storage() @@ -164,17 +169,15 @@ keys = self.unerase(w_dict.dstorage)[0] return iter(range(len(keys))) - def wrapkey(space, key): - return space.wrap(key) + wrapkey = _wrapkey def next_item(self): strategy = self.strategy assert isinstance(strategy, KwargsDictStrategy) for i in self.iterator: - keys, values_w = strategy.unerase( - self.dictimplementation.dstorage) - return self.space.wrap(keys[i]), values_w[i] + keys, values_w = strategy.unerase(self.dictimplementation.dstorage) + return _wrapkey(self.space, keys[i]), values_w[i] else: return None, None diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,42 +1,47 @@ import py + +from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject -from pypy.objspace.std.celldict import ModuleCell, ModuleDictStrategy -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, \ - BaseTestRDictImplementation, BaseTestDevolvedDictImplementation -from pypy.interpreter import gateway +from pypy.objspace.std.test.test_dictmultiobject import ( + BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, + FakeString) space = FakeSpace() class TestCellDict(object): + FakeString = FakeString + def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() d = W_DictMultiObject(space, strategy, storage) v1 = strategy.version - d.setitem("a", 1) + key = "a" + w_key = self.FakeString(key) + d.setitem(w_key, 1) v2 = strategy.version assert v1 is not v2 - assert d.getitem("a") == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, "a") == 1 + assert d.getitem(w_key) == 1 + assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 - d.setitem("a", 2) + d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 - assert d.getitem("a") == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 2 + assert d.getitem(w_key) == 2 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 - d.setitem("a", 3) + d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 - assert d.getitem("a") == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, "a").w_value == 3 + assert d.getitem(w_key) == 3 + assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 - d.delitem("a") + d.delitem(w_key) v5 = strategy.version assert v5 is not v4 - assert d.getitem("a") is None - assert d.strategy.getdictvalue_no_unwrapping(d, "a") is None + assert d.getitem(w_key) is None + assert d.strategy.getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1254,12 +1254,13 @@ return other == "s" d = self.get_impl() - d.setitem("s", 12) - assert d.getitem("s") == 12 - assert d.getitem(F()) == d.getitem("s") + w_key = FakeString("s") + d.setitem(w_key, 12) + assert d.getitem(w_key) == 12 + assert d.getitem(F()) == d.getitem(w_key) d = self.get_impl() - x = d.setdefault("s", 12) + x = d.setdefault(w_key, 12) assert x == 12 x = d.setdefault(F(), 12) assert x == 12 @@ -1269,10 +1270,10 @@ assert x == 12 d = self.get_impl() - d.setitem("s", 12) + d.setitem(w_key, 12) d.delitem(F()) - assert "s" not in d.w_keys() + assert w_key not in d.w_keys() assert F() not in d.w_keys() class TestBytesDictImplementation(BaseTestRDictImplementation): diff --git a/rpython/translator/c/src/instrument.c b/rpython/translator/c/src/instrument.c --- a/rpython/translator/c/src/instrument.c +++ b/rpython/translator/c/src/instrument.c @@ -6,10 +6,10 @@ #include #include #include +#include +#include #ifndef _WIN32 #include -#include -#include #include #else #include diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -2,6 +2,7 @@ #ifdef _WIN32 +#include #include #define __thread __declspec(thread) typedef DWORD RPyThreadTLS; From noreply at buildbot.pypy.org Fri Apr 18 04:50:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 04:50:31 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: update nditer to use iter states Message-ID: <20140418025031.1F7E21C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-speed Changeset: r70745:80dd8d52084e Date: 2014-04-17 22:38 -0400 http://bitbucket.org/pypy/pypy/changeset/80dd8d52084e/ Log: update nditer to use iter states diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -27,16 +27,17 @@ def __init__(self, it, op_flags): self.it = it + self.st = it.reset() self.op_flags = op_flags def done(self): - return self.it.done() + return self.it.done(self.st) def next(self): - self.it.next() + self.st = self.it.next(self.st) def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, array, self.it) + return self.op_flags.get_it_item[self.index](space, array, self.it, self.st) def setitem(self, space, array, val): xxx @@ -89,13 +90,13 @@ self.get_it_item = (get_readonly_item, get_readonly_slice) -def get_readonly_item(space, array, it): - return space.wrap(it.getitem()) +def get_readonly_item(space, array, it, st): + return space.wrap(it.getitem(st)) -def get_readwrite_item(space, array, it): +def get_readwrite_item(space, array, it, st): #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + it.offset, [0], [0], [1], it.array, array) + res = SliceArray(it.array.start + st.offset, [0], [0], [1], it.array, array) #it.dtype.setitem(res, 0, it.getitem()) return W_NDimArray(res) From noreply at buildbot.pypy.org Fri Apr 18 04:50:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 04:50:32 +0200 (CEST) Subject: [pypy-commit] pypy default: merge numpy-speed Message-ID: <20140418025032.A3C671C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70746:f48615b2d047 Date: 2014-04-17 22:39 -0400 http://bitbucket.org/pypy/pypy/changeset/f48615b2d047/ Log: merge numpy-speed diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -284,9 +284,11 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return ArrayIter(self, support.product(shape), shape, r[0], r[1]) - return ArrayIter(self, self.get_size(), self.shape, - self.strides, self.backstrides) + i = ArrayIter(self, support.product(shape), shape, r[0], r[1]) + else: + i = ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) + return i, i.reset() def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -156,10 +156,10 @@ "string is smaller than requested size")) a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) - ai = a.create_iter() + ai, state = a.create_iter() for val in items: - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) return space.wrap(a) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -32,23 +32,23 @@ self.reset() def reset(self): - self.iter = self.base.create_iter() + self.iter, self.state = self.base.create_iter() def descr_len(self, space): return space.wrap(self.base.get_size()) def descr_next(self, space): - if self.iter.done(): + if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) - w_res = self.iter.getitem() - self.iter.next() + w_res = self.iter.getitem(self.state) + self.state = self.iter.next(self.state) return w_res def descr_index(self, space): - return space.wrap(self.iter.index) + return space.wrap(self.state.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.iter.index)) + coords = self.base.to_coords(space, space.wrap(self.state.index)) return space.newtuple([space.wrap(c) for c in coords]) def descr_getitem(self, space, w_idx): @@ -58,13 +58,13 @@ self.reset() base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - base_iter = base.create_iter() - base_iter.next_skip_x(start) + base_iter, base_state = base.create_iter() + base_state = base_iter.next_skip_x(base_state, start) if length == 1: - return base_iter.getitem() + return base_iter.getitem(base_state) res = W_NDimArray.from_shape(space, [length], base.get_dtype(), base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, base_iter, step) + return loop.flatiter_getitem(res, base_iter, base_state, step) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -52,19 +52,20 @@ self.shapelen = len(shape) self.indexes = [0] * len(shape) self._done = False - self.idx_w = [None] * len(idx_w) + self.idx_w_i = [None] * len(idx_w) + self.idx_w_s = [None] * len(idx_w) for i, w_idx in enumerate(idx_w): if isinstance(w_idx, W_NDimArray): - self.idx_w[i] = w_idx.create_iter(shape) + self.idx_w_i[i], self.idx_w_s[i] = w_idx.create_iter(shape) def done(self): return self._done @jit.unroll_safe def next(self): - for w_idx in self.idx_w: - if w_idx is not None: - w_idx.next() + for i, idx_w_i in enumerate(self.idx_w_i): + if idx_w_i is not None: + self.idx_w_s[i] = idx_w_i.next(self.idx_w_s[i]) for i in range(self.shapelen - 1, -1, -1): if self.indexes[i] < self.shape[i] - 1: self.indexes[i] += 1 @@ -79,6 +80,15 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] +class IterState(object): + _immutable_fields_ = ['index', 'indices[*]', 'offset'] + + def __init__(self, index, indices, offset): + self.index = index + self.indices = indices + self.offset = offset + + class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]'] @@ -92,61 +102,59 @@ self.strides = strides self.backstrides = backstrides - self.index = 0 - self.indices = [0] * len(shape) - self.offset = array.start + def reset(self): + return IterState(0, [0] * len(self.shape_m1), self.array.start) @jit.unroll_safe - def reset(self): - self.index = 0 + def next(self, state): + index = state.index + 1 + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - self.indices[i] = 0 - self.offset = self.array.start + idx = indices[i] + if idx < self.shape_m1[i]: + indices[i] = idx + 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + return IterState(index, indices, offset) @jit.unroll_safe - def next(self): - self.index += 1 + def next_skip_x(self, state, step): + assert step >= 0 + if step == 0: + return state + index = state.index + step + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < self.shape_m1[i]: - self.indices[i] = idx + 1 - self.offset += self.strides[i] + idx = indices[i] + if idx < (self.shape_m1[i] + 1) - step: + indices[i] = idx + step + offset += self.strides[i] * step break else: - self.indices[i] = 0 - self.offset -= self.backstrides[i] - - @jit.unroll_safe - def next_skip_x(self, step): - assert step >= 0 - if step == 0: - return - self.index += step - for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < (self.shape_m1[i] + 1) - step: - self.indices[i] = idx + step - self.offset += self.strides[i] * step - break - else: - rem_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + rem_step = (idx + step) // (self.shape_m1[i] + 1) cur_step = step - rem_step * (self.shape_m1[i] + 1) - self.indices[i] += cur_step - self.offset += self.strides[i] * cur_step + indices[i] = idx + cur_step + offset += self.strides[i] * cur_step step = rem_step assert step > 0 + return IterState(index, indices, offset) - def done(self): - return self.index >= self.size + def done(self, state): + return state.index >= self.size - def getitem(self): - return self.array.getitem(self.offset) + def getitem(self, state): + return self.array.getitem(state.offset) - def getitem_bool(self): - return self.array.getitem_bool(self.offset) + def getitem_bool(self, state): + return self.array.getitem_bool(state.offset) - def setitem(self, elem): - self.array.setitem(self.offset, elem) + def setitem(self, state, elem): + self.array.setitem(state.offset, elem) class SliceIterator(ArrayIter): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -12,11 +12,10 @@ AllButAxisIter -call2_driver = jit.JitDriver(name='numpy_call2', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter']) +call2_driver = jit.JitDriver( + name='numpy_call2', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority @@ -46,47 +45,40 @@ if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) - left_iter = w_lhs.create_iter(shape) - right_iter = w_rhs.create_iter(shape) - out_iter = out.create_iter(shape) + left_iter, left_state = w_lhs.create_iter(shape) + right_iter, right_state = w_rhs.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_lhs=w_lhs, w_rhs=w_rhs, - out=out, - left_iter=left_iter, right_iter=right_iter, - out_iter=out_iter) - w_left = left_iter.getitem().convert_to(space, calc_dtype) - w_right = right_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, w_left, w_right).convert_to( + calc_dtype=calc_dtype, res_dtype=res_dtype) + w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) + w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( space, res_dtype)) - left_iter.next() - right_iter.next() - out_iter.next() + left_state = left_iter.next(left_state) + right_state = right_iter.next(right_state) + out_state = out_iter.next(out_state) return out -call1_driver = jit.JitDriver(name='numpy_call1', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_obj', 'out', 'obj_iter', - 'out_iter']) +call1_driver = jit.JitDriver( + name='numpy_call1', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) - obj_iter = w_obj.create_iter(shape) - out_iter = out.create_iter(shape) + obj_iter, obj_state = w_obj.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_obj=w_obj, out=out, - obj_iter=obj_iter, out_iter=out_iter) - elem = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, elem).convert_to(space, res_dtype)) - out_iter.next() - obj_iter.next() + calc_dtype=calc_dtype, res_dtype=res_dtype) + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) return out setslice_driver = jit.JitDriver(name='numpy_setslice', @@ -96,18 +88,20 @@ def setslice(space, shape, target, source): # note that unlike everything else, target and source here are # array implementations, not arrays - target_iter = target.create_iter(shape) - source_iter = source.create_iter(shape) + target_iter, target_state = target.create_iter(shape) + source_iter, source_state = source.create_iter(shape) dtype = target.dtype shapelen = len(shape) - while not target_iter.done(): + while not target_iter.done(target_state): setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + val = source_iter.getitem(source_state) if dtype.is_str_or_unicode(): - target_iter.setitem(dtype.coerce(space, source_iter.getitem())) + val = dtype.coerce(space, val) else: - target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) - target_iter.next() - source_iter.next() + val = val.convert_to(space, dtype) + target_iter.setitem(target_state, val) + target_state = target_iter.next(target_state) + source_state = source_iter.next(source_state) return target reduce_driver = jit.JitDriver(name='numpy_reduce', @@ -116,22 +110,22 @@ reds = 'auto') def compute_reduce(space, obj, calc_dtype, func, done_func, identity): - obj_iter = obj.create_iter() + obj_iter, obj_state = obj.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, calc_dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval cur_value = func(calc_dtype, cur_value, rval) - obj_iter.next() + obj_state = obj_iter.next(obj_state) return cur_value reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', @@ -139,69 +133,76 @@ reds = 'auto') def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): - obj_iter = obj.create_iter() - out_iter = out.create_iter() + obj_iter, obj_state = obj.create_iter() + out_iter, out_state = out.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) def fill(arr, box): - arr_iter = arr.create_iter() - while not arr_iter.done(): - arr_iter.setitem(box) - arr_iter.next() + arr_iter, arr_state = arr.create_iter() + while not arr_iter.done(arr_state): + arr_iter.setitem(arr_state, box) + arr_state = arr_iter.next(arr_state) def assign(space, arr, seq): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() for item in seq: - arr_iter.setitem(arr_dtype.coerce(space, item)) - arr_iter.next() + arr_iter.setitem(arr_state, arr_dtype.coerce(space, item)) + arr_state = arr_iter.next(arr_state) where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') def where(space, out, shape, arr, x, y, dtype): - out_iter = out.create_iter(shape) - arr_iter = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) arr_dtype = arr.get_dtype() - x_iter = x.create_iter(shape) - y_iter = y.create_iter(shape) + x_iter, x_state = x.create_iter(shape) + y_iter, y_state = y.create_iter(shape) if x.is_scalar(): if y.is_scalar(): - iter = arr_iter + iter, state = arr_iter, arr_state else: - iter = y_iter + iter, state = y_iter, y_state else: - iter = x_iter + iter, state = x_iter, x_state shapelen = len(shape) - while not iter.done(): + while not iter.done(state): where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, arr_dtype=arr_dtype) - w_cond = arr_iter.getitem() + w_cond = arr_iter.getitem(arr_state) if arr_dtype.itemtype.bool(w_cond): - w_val = x_iter.getitem().convert_to(space, dtype) + w_val = x_iter.getitem(x_state).convert_to(space, dtype) else: - w_val = y_iter.getitem().convert_to(space, dtype) - out_iter.setitem(w_val) - out_iter.next() - arr_iter.next() - x_iter.next() - y_iter.next() + w_val = y_iter.getitem(y_state).convert_to(space, dtype) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) + x_state = x_iter.next(x_state) + y_state = y_iter.next(y_state) + if x.is_scalar(): + if y.is_scalar(): + state = arr_state + else: + state = y_state + else: + state = x_state return out axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', @@ -212,31 +213,36 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) + out_state = out_iter.reset() if cumulative: temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) + temp_state = temp_iter.reset() else: - temp_iter = out_iter # hack - arr_iter = arr.create_iter() + temp_iter = out_iter # hack + temp_state = out_state + arr_iter, arr_state = arr.create_iter() if identity is not None: identity = identity.convert_to(space, dtype) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - assert not arr_iter.done() - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.indices[axis] == 0: + assert not arr_iter.done(arr_state) + w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) + if out_state.indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) else: - cur = temp_iter.getitem() + cur = temp_iter.getitem(temp_state) w_val = func(dtype, cur, w_val) - out_iter.setitem(w_val) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) if cumulative: - temp_iter.setitem(w_val) - temp_iter.next() - arr_iter.next() - out_iter.next() + temp_iter.setitem(temp_state, w_val) + temp_state = temp_iter.next(temp_state) + else: + temp_state = out_state + arr_state = arr_iter.next(arr_state) return out @@ -249,18 +255,18 @@ result = 0 idx = 1 dtype = arr.get_dtype() - iter = arr.create_iter() - cur_best = iter.getitem() - iter.next() + iter, state = arr.create_iter() + cur_best = iter.getitem(state) + state = iter.next(state) shapelen = len(arr.get_shape()) - while not iter.done(): + while not iter.done(state): arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_val = iter.getitem() + w_val = iter.getitem(state) new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - iter.next() + state = iter.next(state) idx += 1 return result return argmin_argmax @@ -291,17 +297,19 @@ right_impl = right.implementation assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.create_iter() + outi, outs = result.create_iter() lefti = AllButAxisIter(left_impl, len(left_shape) - 1) righti = AllButAxisIter(right_impl, right_critical_dim) + lefts = lefti.reset() + rights = righti.reset() n = left_impl.shape[-1] s1 = left_impl.strides[-1] s2 = right_impl.strides[right_critical_dim] - while not lefti.done(): - while not righti.done(): - oval = outi.getitem() - i1 = lefti.offset - i2 = righti.offset + while not lefti.done(lefts): + while not righti.done(rights): + oval = outi.getitem(outs) + i1 = lefts.offset + i2 = rights.offset i = 0 while i < n: i += 1 @@ -311,11 +319,11 @@ oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) i1 += s1 i2 += s2 - outi.setitem(oval) - outi.next() - righti.next() - righti.reset() - lefti.next() + outi.setitem(outs, oval) + outs = outi.next(outs) + rights = righti.next(rights) + rights = righti.reset() + lefts = lefti.next(lefts) return result count_all_true_driver = jit.JitDriver(name = 'numpy_count', @@ -324,13 +332,13 @@ def count_all_true_concrete(impl): s = 0 - iter = impl.create_iter() + iter, state = impl.create_iter() shapelen = len(impl.shape) dtype = impl.dtype - while not iter.done(): + while not iter.done(state): count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - s += iter.getitem_bool() - iter.next() + s += iter.getitem_bool(state) + state = iter.next(state) return s def count_all_true(arr): @@ -344,18 +352,18 @@ reds = 'auto') def nonzero(res, arr, box): - res_iter = res.create_iter() - arr_iter = arr.create_iter() + res_iter, res_state = res.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) - while not arr_iter.done(): + while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) - if arr_iter.getitem_bool(): + if arr_iter.getitem_bool(arr_state): for d in dims: - res_iter.setitem(box(arr_iter.indices[d])) - res_iter.next() - arr_iter.next() + res_iter.setitem(res_state, box(arr_state.indices[d])) + res_state = res_iter.next(res_state) + arr_state = arr_iter.next(arr_state) return res @@ -365,26 +373,26 @@ reds = 'auto') def getitem_filter(res, arr, index): - res_iter = res.create_iter() + res_iter, res_state = res.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() - arr_iter = arr.create_iter() + index_iter, index_state = index.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() index_dtype = index.get_dtype() # XXX length of shape of index as well? - while not index_iter.done(): + while not index_iter.done(index_state): getitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - res_iter.setitem(arr_iter.getitem()) - res_iter.next() - index_iter.next() - arr_iter.next() + if index_iter.getitem_bool(index_state): + res_iter.setitem(res_state, arr_iter.getitem(arr_state)) + res_state = res_iter.next(res_state) + index_state = index_iter.next(index_state) + arr_state = arr_iter.next(arr_state) return res setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', @@ -393,41 +401,42 @@ reds = 'auto') def setitem_filter(space, arr, index, value): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() + index_iter, index_state = index.create_iter() if value.get_size() == 1: - value_iter = value.create_iter(arr.get_shape()) + value_iter, value_state = value.create_iter(arr.get_shape()) else: - value_iter = value.create_iter() + value_iter, value_state = value.create_iter() index_dtype = index.get_dtype() arr_dtype = arr.get_dtype() - while not index_iter.done(): + while not index_iter.done(index_state): setitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - arr_iter.setitem(arr_dtype.coerce(space, value_iter.getitem())) - value_iter.next() - arr_iter.next() - index_iter.next() + if index_iter.getitem_bool(index_state): + val = arr_dtype.coerce(space, value_iter.getitem(value_state)) + value_state = value_iter.next(value_state) + arr_iter.setitem(arr_state, val) + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], reds = 'auto') -def flatiter_getitem(res, base_iter, step): - ri = res.create_iter() +def flatiter_getitem(res, base_iter, base_state, step): + ri, rs = res.create_iter() dtype = res.get_dtype() - while not ri.done(): + while not ri.done(rs): flatiter_getitem_driver.jit_merge_point(dtype=dtype) - ri.setitem(base_iter.getitem()) - base_iter.next_skip_x(step) - ri.next() + ri.setitem(rs, base_iter.getitem(base_state)) + base_state = base_iter.next_skip_x(base_state, step) + rs = ri.next(rs) return res flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', @@ -436,19 +445,21 @@ def flatiter_setitem(space, arr, val, start, step, length): dtype = arr.get_dtype() - arr_iter = arr.create_iter() - val_iter = val.create_iter() - arr_iter.next_skip_x(start) + arr_iter, arr_state = arr.create_iter() + val_iter, val_state = val.create_iter() + arr_state = arr_iter.next_skip_x(arr_state, start) while length > 0: flatiter_setitem_driver.jit_merge_point(dtype=dtype) + val = val_iter.getitem(val_state) if dtype.is_str_or_unicode(): - arr_iter.setitem(dtype.coerce(space, val_iter.getitem())) + val = dtype.coerce(space, val) else: - arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) + val = val.convert_to(space, dtype) + arr_iter.setitem(arr_state, val) # need to repeat i_nput values until all assignments are done - arr_iter.next_skip_x(step) + arr_state = arr_iter.next_skip_x(arr_state, step) + val_state = val_iter.next(val_state) length -= 1 - val_iter.next() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], @@ -456,30 +467,30 @@ def fromstring_loop(space, a, dtype, itemsize, s): i = 0 - ai = a.create_iter() - while not ai.done(): + ai, state = a.create_iter() + while not ai.done(state): fromstring_driver.jit_merge_point(dtype=dtype, itemsize=itemsize) sub = s[i*itemsize:i*itemsize + itemsize] if dtype.is_str_or_unicode(): val = dtype.coerce(space, space.wrap(sub)) else: val = dtype.itemtype.runpack_str(space, sub) - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) i += 1 def tostring(space, arr): builder = StringBuilder() - iter = arr.create_iter() + iter, state = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) - while not iter.done(): - w_res_str.implementation.setitem(0, iter.getitem()) + while not iter.done(state): + w_res_str.implementation.setitem(0, iter.getitem(state)) for i in range(itemsize): builder.append(res_str_casted[i]) - iter.next() + state = iter.next(state) return builder.build() getitem_int_driver = jit.JitDriver(name = 'numpy_getitem_int', @@ -500,8 +511,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] res.descr_setitem(space, space.newtuple(prefix_w[:prefixlen] + @@ -528,8 +539,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space, @@ -547,13 +558,14 @@ def byteswap(from_, to): dtype = from_.dtype - from_iter = from_.create_iter() - to_iter = to.create_iter() - while not from_iter.done(): + from_iter, from_state = from_.create_iter() + to_iter, to_state = to.create_iter() + while not from_iter.done(from_state): byteswap_driver.jit_merge_point(dtype=dtype) - to_iter.setitem(dtype.itemtype.byteswap(from_iter.getitem())) - to_iter.next() - from_iter.next() + val = dtype.itemtype.byteswap(from_iter.getitem(from_state)) + to_iter.setitem(to_state, val) + to_state = to_iter.next(to_state) + from_state = from_iter.next(from_state) choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], @@ -561,13 +573,15 @@ def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) - iterators = [a.create_iter(shape) for a in choices] - arr_iter = arr.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + pairs = [a.create_iter(shape) for a in choices] + iterators = [i[0] for i in pairs] + states = [i[1] for i in pairs] + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, mode=mode) - index = support.index_w(space, arr_iter.getitem()) + index = support.index_w(space, arr_iter.getitem(arr_state)) if index < 0 or index >= len(iterators): if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( @@ -580,72 +594,73 @@ index = 0 else: index = len(iterators) - 1 - out_iter.setitem(iterators[index].getitem().convert_to(space, dtype)) - for iter in iterators: - iter.next() - out_iter.next() - arr_iter.next() + val = iterators[index].getitem(states[index]).convert_to(space, dtype) + out_iter.setitem(out_state, val) + for i in range(len(iterators)): + states[i] = iterators[i].next(states[i]) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def clip(space, arr, shape, min, max, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) dtype = out.get_dtype() shapelen = len(shape) - min_iter = min.create_iter(shape) - max_iter = max.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + min_iter, min_state = min.create_iter(shape) + max_iter, max_state = max.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) - w_min = min_iter.getitem().convert_to(space, dtype) - w_max = max_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) + w_min = min_iter.getitem(min_state).convert_to(space, dtype) + w_max = max_iter.getitem(max_state).convert_to(space, dtype) if dtype.itemtype.lt(w_v, w_min): w_v = w_min elif dtype.itemtype.gt(w_v, w_max): w_v = w_max - out_iter.setitem(w_v) - arr_iter.next() - max_iter.next() - out_iter.next() - min_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + min_state = min_iter.next(min_state) + max_state = max_iter.next(max_state) + out_state = out_iter.next(out_state) round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def round(space, arr, dtype, shape, decimals, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + while not arr_iter.done(arr_state): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) w_v = dtype.itemtype.round(w_v, decimals) - out_iter.setitem(w_v) - arr_iter.next() - out_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + out_state = out_iter.next(out_state) diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() i = 0 index = [0] * 2 while i < size: diagonal_simple_driver.jit_merge_point(axis1=axis1, axis2=axis2) index[axis1] = i index[axis2] = i + offset - out_iter.setitem(arr.getitem_index(space, index)) + out_iter.setitem(out_state, arr.getitem_index(space, index)) i += 1 - out_iter.next() + out_state = out_iter.next(out_state) def diagonal_array(space, arr, out, offset, axis1, axis2, shape): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 @@ -667,6 +682,6 @@ indexes = (iter.indexes[:a] + [last_index + offset] + iter.indexes[a:b] + [last_index] + iter.indexes[b:shapelen_minus_1]) - out_iter.setitem(arr.getitem_index(space, indexes)) + out_iter.setitem(out_state, arr.getitem_index(space, indexes)) iter.next() - out_iter.next() + out_state = out_iter.next(out_state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -260,24 +260,24 @@ return space.call_function(cache.w_array_str, self) def dump_data(self, prefix='array(', separator=',', suffix=')'): - i = self.create_iter() + i, state = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() s.append(prefix) if not self.is_scalar(): s.append('[') - while not i.done(): + while not i.done(state): if first: first = False else: s.append(separator) s.append(' ') if self.is_scalar() and dtype.is_str(): - s.append(dtype.itemtype.to_str(i.getitem())) + s.append(dtype.itemtype.to_str(i.getitem(state))) else: - s.append(dtype.itemtype.str_format(i.getitem())) - i.next() + s.append(dtype.itemtype.str_format(i.getitem(state))) + state = i.next(state) if not self.is_scalar(): s.append(']') s.append(suffix) @@ -818,8 +818,8 @@ if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - iter = self.create_iter() - return space.wrap(space.is_true(iter.getitem())) + iter, state = self.create_iter() + return space.wrap(space.is_true(iter.getitem(state))) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): @@ -1095,11 +1095,11 @@ builder = StringBuilder() if isinstance(self.implementation, SliceArray): - iter = self.implementation.create_iter() - while not iter.done(): - box = iter.getitem() + iter, state = self.implementation.create_iter() + while not iter.done(state): + box = iter.getitem(state) builder.append(box.raw_str()) - iter.next() + state = iter.next(state) else: builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -27,16 +27,17 @@ def __init__(self, it, op_flags): self.it = it + self.st = it.reset() self.op_flags = op_flags def done(self): - return self.it.done() + return self.it.done(self.st) def next(self): - self.it.next() + self.st = self.it.next(self.st) def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, array, self.it) + return self.op_flags.get_it_item[self.index](space, array, self.it, self.st) def setitem(self, space, array, val): xxx @@ -89,13 +90,13 @@ self.get_it_item = (get_readonly_item, get_readonly_slice) -def get_readonly_item(space, array, it): - return space.wrap(it.getitem()) +def get_readonly_item(space, array, it, st): + return space.wrap(it.getitem(st)) -def get_readwrite_item(space, array, it): +def get_readwrite_item(space, array, it, st): #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + it.offset, [0], [0], [1], it.array, array) + res = SliceArray(it.array.start + st.offset, [0], [0], [1], it.array, array) #it.dtype.setitem(res, 0, it.getitem()) return W_NDimArray(res) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -148,20 +148,22 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() index_impl = index_arr.implementation index_iter = AllButAxisIter(index_impl, axis) + index_state = index_iter.reset() stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): + while not arr_iter.done(arr_state): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + - index_iter.offset, i) + index_state.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, arr_iter.offset) + arr.get_storage(), storage, index_state.offset, arr_state.offset) ArgSort(r).sort() - arr_iter.next() - index_iter.next() + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) return index_arr return argsort @@ -292,12 +294,13 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) + while not arr_iter.done(arr_state): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_state.offset) ArgSort(r).sort() - arr_iter.next() + arr_state = arr_iter.next(arr_state) return sort diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -16,17 +16,18 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 3 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 3 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 5 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 5 + assert s.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -35,17 +36,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 9 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 9 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 1 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 1 + assert s.indices == [1,0] def test_iterator_step(self): #iteration in C order with #contiguous layout => strides[-1] is 1 @@ -56,22 +58,23 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 6 - assert not i.done() - assert i.indices == [1,1] + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 6 + assert not i.done(s) + assert s.indices == [1,1] #And for some big skips - i.next_skip_x(5) - assert i.offset == 11 - assert i.indices == [2,1] - i.next_skip_x(5) + s = i.next_skip_x(s, 5) + assert s.offset == 11 + assert s.indices == [2,1] + s = i.next_skip_x(s, 5) # Note: the offset does not overflow but recycles, # this is good for broadcast - assert i.offset == 1 - assert i.indices == [0,1] - assert i.done() + assert s.offset == 1 + assert s.indices == [0,1] + assert i.done(s) #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -80,17 +83,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 4 - assert i.indices == [1,1] - assert not i.done() - i.next_skip_x(5) - assert i.offset == 5 - assert i.indices == [2,1] - assert not i.done() - i.next_skip_x(5) - assert i.indices == [0,1] - assert i.offset == 3 - assert i.done() + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 4 + assert s.indices == [1,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.offset == 5 + assert s.indices == [2,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.indices == [0,1] + assert s.offset == 3 + assert i.done(s) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -47,7 +47,8 @@ raise Exception("need results") w_res = interp.results[-1] if isinstance(w_res, W_NDimArray): - w_res = w_res.create_iter().getitem() + i, s = w_res.create_iter() + w_res = i.getitem(s) if isinstance(w_res, boxes.W_Float64Box): return w_res.value if isinstance(w_res, boxes.W_Int64Box): @@ -101,7 +102,6 @@ self.check_simple_loop({ 'float_add': 1, 'getarrayitem_gc': 3, - 'getfield_gc': 7, 'guard_false': 1, 'guard_not_invalidated': 1, 'guard_true': 3, @@ -112,7 +112,6 @@ 'raw_load': 2, 'raw_store': 1, 'setarrayitem_gc': 3, - 'setfield_gc': 6, }) def define_pow(): @@ -132,7 +131,6 @@ 'float_mul': 2, 'float_ne': 1, 'getarrayitem_gc': 3, - 'getfield_gc': 7, 'guard_false': 4, 'guard_not_invalidated': 1, 'guard_true': 5, @@ -144,7 +142,6 @@ 'raw_load': 2, 'raw_store': 1, 'setarrayitem_gc': 3, - 'setfield_gc': 6, }) def define_sum(): @@ -527,7 +524,6 @@ self.check_trace_count(1) self.check_simple_loop({ 'getarrayitem_gc': 2, - 'getfield_gc': 4, 'guard_not_invalidated': 1, 'guard_true': 3, 'int_add': 6, @@ -538,13 +534,12 @@ 'raw_load': 1, 'raw_store': 1, 'setarrayitem_gc': 2, - 'setfield_gc': 4, }) def define_dot(): return """ a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] - b=[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] + b = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]] c = dot(a, b) c -> 1 -> 2 """ @@ -564,29 +559,30 @@ 'raw_load': 2, }) self.check_resops({ + 'arraylen_gc': 1, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 7, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 35, - 'getfield_gc_pure': 39, + 'getfield_gc': 8, + 'getfield_gc_pure': 44, 'guard_class': 4, 'guard_false': 14, - 'guard_nonnull': 12, - 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 13, - 'guard_value': 4, 'int_add': 25, 'int_ge': 4, 'int_le': 8, 'int_lt': 11, 'int_sub': 4, 'jump': 3, + 'new_array': 1, + 'new_with_vtable': 7, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 10, - 'setfield_gc': 14, + 'same_as': 2, + 'setarrayitem_gc': 8, + 'setfield_gc': 16, }) def define_argsort(): @@ -599,3 +595,31 @@ def test_argsort(self): result = self.run("argsort") assert result == 6 + + def define_where(): + return """ + a = [1, 0, 1, 0] + x = [1, 2, 3, 4] + y = [-10, -20, -30, -40] + r = where(a, x, y) + r -> 3 + """ + + def test_where(self): + result = self.run("where") + assert result == -40 + self.check_trace_count(1) + self.check_simple_loop({ + 'float_ne': 1, + 'getarrayitem_gc': 4, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 5, + 'int_add': 12, + 'int_ge': 1, + 'int_lt': 4, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 4, + }) From noreply at buildbot.pypy.org Fri Apr 18 04:50:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 04:50:33 +0200 (CEST) Subject: [pypy-commit] pypy default: update whatsnew Message-ID: <20140418025033.BB9051C048F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70747:001f22802214 Date: 2014-04-17 22:41 -0400 http://bitbucket.org/pypy/pypy/changeset/001f22802214/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -140,3 +140,6 @@ .. branch: numpypy-nditer Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better From noreply at buildbot.pypy.org Fri Apr 18 07:15:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 07:15:19 +0200 (CEST) Subject: [pypy-commit] pypy default: enable slice test_zjit Message-ID: <20140418051519.31DC91C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70748:2b3387207a65 Date: 2014-04-18 00:46 -0400 http://bitbucket.org/pypy/pypy/changeset/2b3387207a65/ Log: enable slice test_zjit diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -353,14 +353,21 @@ def test_slice(self): result = self.run("slice") assert result == 18 - py.test.skip("don't run for now") - self.check_simple_loop({'raw_load': 2, - 'float_add': 1, - 'raw_store': 1, - 'int_add': 3, - 'int_ge': 1, 'guard_false': 1, - 'jump': 1, - 'arraylen_gc': 1}) + self.check_trace_count(1) + self.check_simple_loop({ + 'float_add': 1, + 'getarrayitem_gc': 3, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 3, + 'int_add': 9, + 'int_ge': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + }) def define_take(): return """ From noreply at buildbot.pypy.org Fri Apr 18 07:15:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 07:15:20 +0200 (CEST) Subject: [pypy-commit] pypy default: add pow_int test_zjit Message-ID: <20140418051520.6FF451C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70749:d13b267aa161 Date: 2014-04-18 00:58 -0400 http://bitbucket.org/pypy/pypy/changeset/d13b267aa161/ Log: add pow_int test_zjit diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -144,6 +144,34 @@ 'setarrayitem_gc': 3, }) + def define_pow_int(): + return """ + a = astype(|30|, int) + b = astype([2], int) + c = a ** b + c -> 3 + """ + + def test_pow_int(self): + result = self.run("pow_int") + assert result == 3 ** 2 + self.check_trace_count(2) # extra one for the astype + del get_stats().loops[0] # we don't care about it + self.check_simple_loop({ + 'call': 1, + 'getarrayitem_gc': 3, + 'guard_false': 1, + 'guard_not_invalidated': 1, + 'guard_true': 3, + 'int_add': 9, + 'int_ge': 1, + 'int_lt': 3, + 'jump': 1, + 'raw_load': 2, + 'raw_store': 1, + 'setarrayitem_gc': 3, + }) + def define_sum(): return """ a = |30| From noreply at buildbot.pypy.org Fri Apr 18 08:09:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 08:09:36 +0200 (CEST) Subject: [pypy-commit] pypy default: add sanity check that iter state is only used with corresponding iterator Message-ID: <20140418060936.14B3B1C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70750:ceb3e5defbab Date: 2014-04-18 02:07 -0400 http://bitbucket.org/pypy/pypy/changeset/ceb3e5defbab/ Log: add sanity check that iter state is only used with corresponding iterator diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -81,9 +81,10 @@ class IterState(object): - _immutable_fields_ = ['index', 'indices[*]', 'offset'] + _immutable_fields_ = ['iterator', 'index', 'indices[*]', 'offset'] - def __init__(self, index, indices, offset): + def __init__(self, iterator, index, indices, offset): + self.iterator = iterator self.index = index self.indices = indices self.offset = offset @@ -103,10 +104,11 @@ self.backstrides = backstrides def reset(self): - return IterState(0, [0] * len(self.shape_m1), self.array.start) + return IterState(self, 0, [0] * len(self.shape_m1), self.array.start) @jit.unroll_safe def next(self, state): + assert state.iterator is self index = state.index + 1 indices = state.indices offset = state.offset @@ -119,10 +121,11 @@ else: indices[i] = 0 offset -= self.backstrides[i] - return IterState(index, indices, offset) + return IterState(self, index, indices, offset) @jit.unroll_safe def next_skip_x(self, state, step): + assert state.iterator is self assert step >= 0 if step == 0: return state @@ -142,18 +145,22 @@ offset += self.strides[i] * cur_step step = rem_step assert step > 0 - return IterState(index, indices, offset) + return IterState(self, index, indices, offset) def done(self, state): + assert state.iterator is self return state.index >= self.size def getitem(self, state): + assert state.iterator is self return self.array.getitem(state.offset) def getitem_bool(self, state): + assert state.iterator is self return self.array.getitem_bool(state.offset) def setitem(self, state, elem): + assert state.iterator is self self.array.setitem(state.offset, elem) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -617,7 +617,7 @@ 'raw_store': 1, 'same_as': 2, 'setarrayitem_gc': 8, - 'setfield_gc': 16, + 'setfield_gc': 21, }) def define_argsort(): From noreply at buildbot.pypy.org Fri Apr 18 09:45:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 09:45:07 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test Message-ID: <20140418074507.DE0181D2954@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70751:85e7dc6a1289 Date: 2014-04-18 09:44 +0200 http://bitbucket.org/pypy/pypy/changeset/85e7dc6a1289/ Log: fix test diff --git a/rpython/rlib/test/test_rlocale.py b/rpython/rlib/test/test_rlocale.py --- a/rpython/rlib/test/test_rlocale.py +++ b/rpython/rlib/test/test_rlocale.py @@ -40,5 +40,8 @@ if sys.platform != "darwin" and not sys.platform.startswith("linux"): py.test.skip("there is (maybe) no libintl here") _gettext = external('gettext', [rffi.CCHARP], rffi.CCHARP) - res = _gettext("1234") + p = rffi.str2charp("1234") + res = _gettext(p) + assert res == p assert rffi.charp2str(res) == "1234" + rffi.free_charp(p) From noreply at buildbot.pypy.org Fri Apr 18 10:03:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 10:03:14 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: redo this optimization Message-ID: <20140418080314.24D4C1D2956@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70752:2712ec3d1a46 Date: 2014-04-18 10:02 +0200 http://bitbucket.org/pypy/pypy/changeset/2712ec3d1a46/ Log: redo this optimization diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -136,8 +136,8 @@ stm_thread_local.shadowstack; #endif + STM_PUSH_ROOT(stm_thread_local, STM_STACK_MARKER_NEW); STM_PUSH_ROOT(stm_thread_local, arg); - /*STM_PUSH_ROOT(END_MARKER_OFF); XXX redo this optimization */ while (1) { @@ -165,7 +165,7 @@ /* invoke the callback in the new transaction */ STM_POP_ROOT(stm_thread_local, arg); - assert(v_old_shadowstack == stm_thread_local.shadowstack); + assert(v_old_shadowstack == stm_thread_local.shadowstack - 1); STM_PUSH_ROOT(stm_thread_local, arg); long result = v_callback(arg, v_counter); @@ -200,9 +200,9 @@ assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); } - //gcptr x = stm_pop_root(); /* pop the END_MARKER */ - //assert(x == END_MARKER_OFF || x == END_MARKER_ON); STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */ + uintptr_t x = (uintptr_t)STM_POP_ROOT_RET(stm_thread_local); + assert(x == STM_STACK_MARKER_NEW || x == STM_STACK_MARKER_OLD); assert(v_old_shadowstack == stm_thread_local.shadowstack); } From noreply at buildbot.pypy.org Fri Apr 18 11:11:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 11:11:56 +0200 (CEST) Subject: [pypy-commit] stmgc marker: "Markers", which are a lightweight but ad-hoc way to mark the current position Message-ID: <20140418091156.5A50B1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1160:4d1f8449c75a Date: 2014-04-18 11:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/4d1f8449c75a/ Log: "Markers", which are a lightweight but ad-hoc way to mark the current position From noreply at buildbot.pypy.org Fri Apr 18 11:11:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 11:11:57 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Get started Message-ID: <20140418091157.6D04E1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1161:e24c66f0b2b4 Date: 2014-04-18 11:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/e24c66f0b2b4/ Log: Get started diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -630,6 +630,7 @@ value before the transaction start */ stm_thread_local_t *tl = pseg->pub.running_thread; assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); + pseg->shadowstack_at_abort = tl->shadowstack; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -152,6 +152,7 @@ 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; + struct stm_shadowentry_s *shadowstack_at_abort; /* For debugging */ #ifndef NDEBUG diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -379,7 +379,7 @@ struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - if (((uintptr_t)current->ss) > STM_STACK_MARKER_OLD) + if ((((uintptr_t)current->ss) & 3) == 0) mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); diff --git a/c7/stm/marker.c b/c7/stm/marker.c new file mode 100644 --- /dev/null +++ b/c7/stm/marker.c @@ -0,0 +1,32 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +void (*stmcb_expand_marker)(uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); + + +void marker_fetch(stm_thread_local_t *tl, + enum stm_time_e attribute_to, double time) +{ + tl->longest_marker_state = attribute_to; + tl->longest_marker_time = time; + + if (stmcb_expand_marker != NULL) { + struct stm_shadowentry_s *current = tl->shadowstack - 1; + struct stm_shadowentry_s *base = tl->shadowstack_base; + while (--current >= base) { + uintptr_t x = (uintptr_t)current->ss; + if (x & 1) { + /* the stack entry is an odd number */ + tl->longest_marker_self[0] = 0; + stmcb_expand_marker(x, current[1].ss, + tl->longest_marker_self, + sizeof(tl->longest_marker_self)); + break; + } + } + } +} diff --git a/c7/stm/marker.h b/c7/stm/marker.h new file mode 100644 --- /dev/null +++ b/c7/stm/marker.h @@ -0,0 +1,3 @@ + +void marker_fetch(stm_thread_local_t *tl, + enum stm_time_e attribute_to, double time); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -160,28 +160,26 @@ --current; OPT_ASSERT(current >= base); - switch ((uintptr_t)current->ss) { + uintptr_t x = (uintptr_t)current->ss; - case 0: /* NULL */ - continue; - - case STM_STACK_MARKER_NEW: + if ((x & 3) == 0) { + /* the stack entry is a regular pointer (possibly NULL) */ + minor_trace_if_young(¤t->ss); + } + else if (x == STM_STACK_MARKER_NEW) { /* the marker was not already seen: mark it as seen, but continue looking more deeply in the shadowstack */ current->ss = (object_t *)STM_STACK_MARKER_OLD; - continue; - - case STM_STACK_MARKER_OLD: + } + else if (x == STM_STACK_MARKER_OLD) { /* the marker was already seen: we can stop the root stack tracing at this point */ - goto interrupt; - - default: - /* the stack entry is a regular pointer */ - minor_trace_if_young(¤t->ss); + break; + } + else { + /* it is an odd-valued marker, ignore */ } } - interrupt: minor_trace_if_young(&tl->thread_local_obj); } diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -35,8 +35,18 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); - add_timing(tl, attribute_to, tl->timing[STM_TIME_RUN_CURRENT]); + double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT]; + add_timing(tl, attribute_to, time_this_transaction); tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; + + if (attribute_to != STM_TIME_RUN_COMMITTED && + time_this_transaction > tl->longest_marker_time) { + assert(tl->shadowstack == + STM_PSEGMENT->shadowstack_at_start_of_transaction); + tl->shadowstack = STM_PSEGMENT->shadowstack_at_abort; + marker_fetch(tl, attribute_to, time_this_transaction); + tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; + } } static const char *timer_names[] = { diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -14,6 +14,7 @@ #include "stm/fprintcolor.h" #include "stm/weakref.h" #include "stm/timing.h" +#include "stm/marker.h" #include "stm/misc.c" #include "stm/list.c" @@ -33,3 +34,4 @@ #include "stm/fprintcolor.c" #include "stm/weakref.c" #include "stm/timing.c" +#include "stm/marker.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -90,6 +90,11 @@ float timing[_STM_TIME_N]; double _timing_cur_start; enum stm_time_e _timing_cur_state; + /* the marker with the longest associated time so far */ + enum stm_time_e longest_marker_state; + double longest_marker_time; + char longest_marker_self[80]; + char longest_marker_other[80]; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -264,8 +269,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) -#define STM_STACK_MARKER_NEW 1 -#define STM_STACK_MARKER_OLD 2 +#define STM_STACK_MARKER_NEW 2 +#define STM_STACK_MARKER_OLD 6 /* Every thread needs to have a corresponding stm_thread_local_t @@ -368,6 +373,14 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose); +/* The markers pushed in the shadowstack are an odd number followed by a + regular pointer. When needed, this library invokes this callback to + turn this pair into a human-readable explanation. */ +extern void (*stmcb_expand_marker)(uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); + + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -28,6 +28,10 @@ int associated_segment_num; uint32_t events[]; float timing[]; + int longest_marker_state; + double longest_marker_time; + char longest_marker_self[]; + char longest_marker_other[]; ...; } stm_thread_local_t; @@ -118,6 +122,9 @@ #define STM_TIME_SYNC_PAUSE ... void stm_flush_timing(stm_thread_local_t *, int); + +void (*stmcb_expand_marker)(uintptr_t odd_number, object_t *following_object, + char *outputbuf, size_t outputbufsize); """) @@ -435,6 +442,7 @@ self.current_thread = 0 def teardown_method(self, meth): + lib.stmcb_expand_marker = ffi.NULL tl = self.tls[self.current_thread] if lib._stm_in_transaction(tl) and lib.stm_is_inevitable(): self.commit_transaction() # must succeed! diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py new file mode 100644 --- /dev/null +++ b/c7/test/test_marker.py @@ -0,0 +1,82 @@ +from support import * +import py, time + +class TestMarker(BaseTest): + + def test_marker_odd_simple(self): + self.start_transaction() + self.push_root(ffi.cast("object_t *", 29)) + stm_minor_collect() + stm_major_collect() + # assert did not crash + x = self.pop_root() + assert int(ffi.cast("uintptr_t", x)) == 29 + + def test_abort_marker_no_shadowstack(self): + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_OUTSIDE_TRANSACTION + assert tl.longest_marker_time == 0.0 + # + self.start_transaction() + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert tl.longest_marker_self[0] == '\x00' + assert tl.longest_marker_other[0] == '\x00' + + def test_abort_marker_shadowstack(self): + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert tl.longest_marker_self[0] == '\x00' + assert tl.longest_marker_other[0] == '\x00' + + def test_abort_marker_no_shadowstack_cb(self): + @ffi.callback("void(uintptr_t, object_t *, char *, size_t)") + def expand_marker(number, ptr, outbuf, outbufsize): + seen.append(1) + lib.stmcb_expand_marker = expand_marker + seen = [] + # + self.start_transaction() + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_self[0] == '\x00' + assert not seen + + def test_abort_marker_shadowstack_cb(self): + @ffi.callback("void(uintptr_t, object_t *, char *, size_t)") + def expand_marker(number, ptr, outbuf, outbufsize): + s = '%d %r\x00' % (number, ptr) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -1,7 +1,7 @@ from support import * import py -class TestBasic(BaseTest): +class TestNursery(BaseTest): def test_nursery_full(self): lib._stm_set_nursery_free_count(2048) From noreply at buildbot.pypy.org Fri Apr 18 11:32:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 11:32:49 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Fixes, see comment in marker.c Message-ID: <20140418093249.E633E1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1162:80d12fdc0b89 Date: 2014-04-18 11:32 +0200 http://bitbucket.org/pypy/stmgc/changeset/80d12fdc0b89/ Log: Fixes, see comment in marker.c diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -620,6 +620,9 @@ (int)pseg->transaction_state); } + /* look up and preserve the marker information as a string */ + marker_fetch_expand(pseg); + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -158,6 +158,9 @@ #ifndef NDEBUG pthread_t running_pthread; #endif + + /* Temporarily stores the marker information */ + char marker_self[_STM_MARKER_LEN]; }; enum /* safe_point */ { diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -8,25 +8,39 @@ char *outputbuf, size_t outputbufsize); -void marker_fetch(stm_thread_local_t *tl, - enum stm_time_e attribute_to, double time) +static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) { - tl->longest_marker_state = attribute_to; - tl->longest_marker_time = time; + pseg->marker_self[0] = 0; if (stmcb_expand_marker != NULL) { + stm_thread_local_t *tl = pseg->pub.running_thread; struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; while (--current >= base) { uintptr_t x = (uintptr_t)current->ss; if (x & 1) { /* the stack entry is an odd number */ - tl->longest_marker_self[0] = 0; stmcb_expand_marker(x, current[1].ss, - tl->longest_marker_self, - sizeof(tl->longest_marker_self)); + pseg->marker_self, _STM_MARKER_LEN); break; } } } } + +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time) +{ + /* Copies the marker information from pseg to tl. This is called + indirectly from abort_with_mutex(), but only if the lost time is + greater than that of the previous recorded marker. By contrast, + pseg->marker_self has been filled already in all cases. The + reason for the two steps is that we must fill pseg->marker_self + earlier than now (some objects may be GCed), but we only know + here the total time it gets attributed. + */ + tl->longest_marker_state = attribute_to; + tl->longest_marker_time = time; + memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); +} diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -1,3 +1,5 @@ -void marker_fetch(stm_thread_local_t *tl, - enum stm_time_e attribute_to, double time); +static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg); +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time); diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -40,12 +40,10 @@ tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; if (attribute_to != STM_TIME_RUN_COMMITTED && - time_this_transaction > tl->longest_marker_time) { - assert(tl->shadowstack == - STM_PSEGMENT->shadowstack_at_start_of_transaction); - tl->shadowstack = STM_PSEGMENT->shadowstack_at_abort; - marker_fetch(tl, attribute_to, time_this_transaction); - tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; + time_this_transaction * 0.99 > tl->longest_marker_time) { + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + marker_copy(tl, pseg, attribute_to, time_this_transaction); } } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -73,6 +73,8 @@ _STM_TIME_N }; +#define _STM_MARKER_LEN 80 + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -93,8 +95,8 @@ /* the marker with the longest associated time so far */ enum stm_time_e longest_marker_state; double longest_marker_time; - char longest_marker_self[80]; - char longest_marker_other[80]; + char longest_marker_self[_STM_MARKER_LEN]; + char longest_marker_other[_STM_MARKER_LEN]; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; From noreply at buildbot.pypy.org Fri Apr 18 12:06:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 12:06:31 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Tweaks, and check with demo2.c that it's usable from C code Message-ID: <20140418100631.C62B41C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1163:9568cd489776 Date: 2014-04-18 12:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/9568cd489776/ Log: Tweaks, and check with demo2.c that it's usable from C code diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -44,6 +44,14 @@ visit((object_t **)&n->next); } +static void expand_marker(uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize) +{ + assert(following_object == NULL); + snprintf(outputbuf, outputbufsize, "<%lu>", odd_number); +} + nodeptr_t global_chained_list; @@ -198,8 +206,18 @@ STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ + int loops = 0; + while (check_sorted() == -1) { + + STM_PUSH_ROOT(stm_thread_local, (uintptr_t)(2 * loops + 1)); + STM_PUSH_ROOT(stm_thread_local, NULL); + bubble_run(); + + STM_POP_ROOT_RET(stm_thread_local); + STM_POP_ROOT_RET(stm_thread_local); + loops++; } STM_POP_ROOT(stm_thread_local, global_chained_list); @@ -246,6 +264,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + stmcb_expand_marker = expand_marker; setup_list(); diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -10,7 +10,8 @@ static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) { - pseg->marker_self[0] = 0; + if (pseg->marker_self[0] != 0) + return; /* already collected an entry */ if (stmcb_expand_marker != NULL) { stm_thread_local_t *tl = pseg->pub.running_thread; @@ -22,6 +23,11 @@ /* the stack entry is an odd number */ stmcb_expand_marker(x, current[1].ss, pseg->marker_self, _STM_MARKER_LEN); + + if (pseg->marker_self[0] == 0) { + pseg->marker_self[0] = '?'; + pseg->marker_self[1] = 0; + } break; } } @@ -40,7 +46,10 @@ earlier than now (some objects may be GCed), but we only know here the total time it gets attributed. */ - tl->longest_marker_state = attribute_to; - tl->longest_marker_time = time; - memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + if (time * 0.99 > tl->longest_marker_time) { + tl->longest_marker_state = attribute_to; + tl->longest_marker_time = time; + memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + } + pseg->marker_self[0] = 0; } diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -39,8 +39,7 @@ add_timing(tl, attribute_to, time_this_transaction); tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; - if (attribute_to != STM_TIME_RUN_COMMITTED && - time_this_transaction * 0.99 > tl->longest_marker_time) { + if (attribute_to != STM_TIME_RUN_COMMITTED) { struct stm_priv_segment_info_s *pseg = get_priv_segment(STM_SEGMENT->segment_num); marker_copy(tl, pseg, attribute_to, time_this_transaction); @@ -81,6 +80,10 @@ fprintf(stderr, " %-24s %9u %8.3f s\n", timer_names[i], tl->events[i], (double)tl->timing[i]); } + fprintf(stderr, " %-24s %6s %11.6f s\n", + "longest recorded marker", "", tl->longest_marker_time); + fprintf(stderr, " \"%.*s\"\n", + (int)_STM_MARKER_LEN, tl->longest_marker_self); s_mutex_unlock(); } } From noreply at buildbot.pypy.org Fri Apr 18 12:10:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 12:10:05 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix for release-demo2 Message-ID: <20140418101005.5C8A61C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1164:876e03efa95e Date: 2014-04-18 12:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/876e03efa95e/ Log: Fix for release-demo2 diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -203,7 +203,7 @@ } STM_POP_ROOT(stm_thread_local, global_chained_list); - assert(org == (char *)stm_thread_local.shadowstack); + OPT_ASSERT(org == (char *)stm_thread_local.shadowstack); unregister_thread_local(); status = sem_post(&done); assert(status == 0); From noreply at buildbot.pypy.org Fri Apr 18 12:10:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 12:10:06 +0200 (CEST) Subject: [pypy-commit] stmgc marker: hg merge default Message-ID: <20140418101006.5654F1C01F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1165:61e5e75b84f9 Date: 2014-04-18 12:09 +0200 http://bitbucket.org/pypy/stmgc/changeset/61e5e75b84f9/ Log: hg merge default diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -221,7 +221,7 @@ } STM_POP_ROOT(stm_thread_local, global_chained_list); - assert(org == (char *)stm_thread_local.shadowstack); + OPT_ASSERT(org == (char *)stm_thread_local.shadowstack); unregister_thread_local(); status = sem_post(&done); assert(status == 0); From noreply at buildbot.pypy.org Fri Apr 18 12:26:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 12:26:14 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Move this into macros. Should allow future optimizations or Message-ID: <20140418102614.434491C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1166:79b97025b1ba Date: 2014-04-18 12:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/79b97025b1ba/ Log: Move this into macros. Should allow future optimizations or pushing in some other stack etc. diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -210,13 +210,11 @@ while (check_sorted() == -1) { - STM_PUSH_ROOT(stm_thread_local, (uintptr_t)(2 * loops + 1)); - STM_PUSH_ROOT(stm_thread_local, NULL); + STM_PUSH_MARKER(stm_thread_local, 2 * loops + 1, NULL); bubble_run(); - STM_POP_ROOT_RET(stm_thread_local); - STM_POP_ROOT_RET(stm_thread_local); + STM_POP_MARKER(stm_thread_local); loops++; } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -274,6 +274,19 @@ #define STM_STACK_MARKER_NEW 2 #define STM_STACK_MARKER_OLD 6 +#define STM_PUSH_MARKER(tl, odd_num, p) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + STM_PUSH_ROOT(tl, _odd_num); \ + STM_PUSH_ROOT(tl, p); \ +} while (0) + +#define STM_POP_MARKER(tl) ({ \ + object_t *_popped = STM_POP_ROOT_RET(tl); \ + STM_POP_ROOT_RET(tl); \ + _popped; \ +}) + /* Every thread needs to have a corresponding stm_thread_local_t structure. It may be a "__thread" global variable or something else. From noreply at buildbot.pypy.org Fri Apr 18 13:24:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 18 Apr 2014 13:24:31 +0200 (CEST) Subject: [pypy-commit] pypy default: "flushing forward"? Message-ID: <20140418112431.F0FC91C048F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70753:17a4ad744f46 Date: 2014-04-18 13:23 +0200 http://bitbucket.org/pypy/pypy/changeset/17a4ad744f46/ Log: "flushing forward"? diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -40,7 +40,7 @@ ``pypy-stm`` project is to improve what is so far the state-of-the-art for using multiple CPUs, which for cases where separate processes don't work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is flushing forward an approach to *hide* the threads, as +``pypy-stm`` is pushing forward an approach to *hide* the threads, as described below in `atomic sections`_. From noreply at buildbot.pypy.org Fri Apr 18 14:36:04 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 18 Apr 2014 14:36:04 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: do error checking, add app-level searchsort, which needs getitem(). Message-ID: <20140418123604.675011C03B3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70754:3ec459c1d040 Date: 2014-04-18 15:34 +0300 http://bitbucket.org/pypy/pypy/changeset/3ec459c1d040/ Log: do error checking, add app-level searchsort, which needs getitem(). diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -22,3 +22,34 @@ arr[j] = i i += step return arr + +# How to call this from descr_searchsorted?? +def searchsort(space, arr, v, side, result): + def left_find_index(a, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if a[imid] <= val: + imin = imid +1 + else: + imax = imid + return imin + def right_find_index(a, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if a[imid] < val: + imin = imid +1 + else: + imax = imid + return imin + if side == 'l': + func = left_find_index + else: + func = right_find_index + for i in range(v.get_size()): + result[i] = func(self, v[i]) + return result + diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -693,9 +693,32 @@ loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out - def descr_searchsorted(self, space, w_v, w_side='left'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "searchsorted not implemented yet")) + @unwrap_spec(side=str, w_sorter=WrappedDefault(None)) + def descr_searchsorted(self, space, w_v, side='left', w_sorter=None): + from pypy.module.micronumpy.sort import searchsort + if not space.is_none(w_sorter): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'sorter not supported in searchsort')) + if not side or len(side) < 1: + raise OperationError(space.w_ValueError, space.wrap( + "expected nonempty string for keyword 'side'")) + elif side[0] == 'l' or side[0] == 'L': + side = 'l' + elif side[0] == 'r' or side[0] == 'R': + side = 'r' + else: + raise oefmt(space.w_ValueError, + "'%s' is an invalid value for keyword 'side'", side) + ret = W_NDimArray.from_shape(space, self.get_shape(), + descriptor.get_dtype_cache(space).w_longdtype) + if len(self.get_shape()) > 1: + raise OperationError(space.w_ValueError, space.wrap( + "a must be a 1-d array")) + v = convert_to_array(space, w_v) + if len(v.get_shape()) >1: + raise OperationError(space.w_ValueError, space.wrap( + "v must be a 1-d array-like")) + return searchsort(self, space, v, side, ret) def descr_setasflat(self, space, w_v): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -1351,6 +1374,7 @@ dot = interp2app(W_NDimArray.descr_dot), var = interp2app(W_NDimArray.descr_var), std = interp2app(W_NDimArray.descr_std), + searchsorted = interp2app(W_NDimArray.descr_searchsorted), cumsum = interp2app(W_NDimArray.descr_cumsum), cumprod = interp2app(W_NDimArray.descr_cumprod), From noreply at buildbot.pypy.org Fri Apr 18 18:45:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 18:45:33 +0200 (CEST) Subject: [pypy-commit] pypy default: fix validation of missing r/w op_flag to nditer Message-ID: <20140418164533.7C4B71C073C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70755:9f3d775f07b7 Date: 2014-04-18 11:58 -0400 http://bitbucket.org/pypy/pypy/changeset/9f3d775f07b7/ Log: fix validation of missing r/w op_flag to nditer diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -80,7 +80,7 @@ class OpFlag(object): def __init__(self): - self.rw = 'r' + self.rw = '' self.broadcast = True self.force_contig = False self.force_align = False @@ -145,7 +145,11 @@ else: raise OperationError(space.w_ValueError, space.wrap( 'op_flags must be a tuple or array of per-op flag-tuples')) - if op_flag.rw == 'r': + if op_flag.rw == '': + raise oefmt(space.w_ValueError, + "None of the iterator flags READWRITE, READONLY, or " + "WRITEONLY were specified for an operand") + elif op_flag.rw == 'r': op_flag.get_it_item = (get_readonly_item, get_readonly_slice) elif op_flag.rw == 'rw': op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -143,21 +143,17 @@ a = arange(6).reshape(2,3) - 3 exc = raises(TypeError, nditer, a, op_dtypes=['complex']) assert str(exc.value).startswith("Iterator operand required copying or buffering") + exc = raises(ValueError, nditer, a, op_flags=['copy'], op_dtypes=['complex128']) + assert str(exc.value) == "None of the iterator flags READWRITE, READONLY, or WRITEONLY were specified for an operand" r = [] for x in nditer(a, op_flags=['readonly','copy'], op_dtypes=['complex128']): r.append(sqrt(x)) assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, - 1+0j, 1.41421356237+0j]).sum()) < 1e-5 - r = [] - for x in nditer(a, op_flags=['copy'], - op_dtypes=['complex128']): - r.append(sqrt(x)) - assert abs((array(r) - [1.73205080757j, 1.41421356237j, 1j, 0j, - 1+0j, 1.41421356237+0j]).sum()) < 1e-5 + 1+0j, 1.41421356237+0j]).sum()) < 1e-5 multi = nditer([None, array([2, 3], dtype='int64'), array(2., dtype='double')], - op_dtypes = ['int64', 'int64', 'float64'], - op_flags = [['writeonly', 'allocate'], ['readonly'], ['readonly']]) + op_dtypes=['int64', 'int64', 'float64'], + op_flags=[['writeonly', 'allocate'], ['readonly'], ['readonly']]) for a, b, c in multi: a[...] = b * c assert (multi.operands[0] == [4, 6]).all() From noreply at buildbot.pypy.org Fri Apr 18 18:45:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 18:45:34 +0200 (CEST) Subject: [pypy-commit] pypy default: fix nditer getitem return types Message-ID: <20140418164534.ADE8C1C073C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70756:f2b222d84fb1 Date: 2014-04-18 12:29 -0400 http://bitbucket.org/pypy/pypy/changeset/f2b222d84fb1/ Log: fix nditer getitem return types diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -2,9 +2,8 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, oefmt -from pypy.module.micronumpy import ufuncs, support +from pypy.module.micronumpy import ufuncs, support, concrete from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy.concrete import SliceArray from pypy.module.micronumpy.descriptor import decode_w_dtype from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.strides import (calculate_broadcast_strides, @@ -25,7 +24,8 @@ class IteratorMixin(object): _mixin_ = True - def __init__(self, it, op_flags): + def __init__(self, nditer, it, op_flags): + self.nditer = nditer self.it = it self.st = it.reset() self.op_flags = op_flags @@ -37,7 +37,7 @@ self.st = self.it.next(self.st) def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, array, self.it, self.st) + return self.op_flags.get_it_item[self.index](space, self.nditer, self.it, self.st) def setitem(self, space, array, val): xxx @@ -90,14 +90,17 @@ self.get_it_item = (get_readonly_item, get_readonly_slice) -def get_readonly_item(space, array, it, st): - return space.wrap(it.getitem(st)) +def get_readonly_item(space, nditer, it, st): + res = concrete.ConcreteNonWritableArrayWithBase( + [], it.array.dtype, it.array.order, [], [], it.array.storage, nditer) + res.start = st.offset + return W_NDimArray(res) -def get_readwrite_item(space, array, it, st): - #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + st.offset, [0], [0], [1], it.array, array) - #it.dtype.setitem(res, 0, it.getitem()) +def get_readwrite_item(space, nditer, it, st): + res = concrete.ConcreteArrayWithBase( + [], it.array.dtype, it.array.order, [], [], it.array.storage, nditer) + res.start = st.offset return W_NDimArray(res) @@ -398,12 +401,14 @@ if self.external_loop: for i in range(len(self.seq)): self.iters.append(ExternalLoopIterator( + self, get_external_loop_iter( space, self.order, self.seq[i], iter_shape), self.op_flags[i])) else: for i in range(len(self.seq)): self.iters.append(BoxIterator( + self, get_iter( space, self.order, self.seq[i], iter_shape, self.dtypes[i]), self.op_flags[i])) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -4,14 +4,20 @@ class AppTestNDIter(BaseNumpyAppTest): def test_basic(self): - from numpy import arange, nditer + from numpy import arange, nditer, ndarray a = arange(6).reshape(2,3) + i = nditer(a) r = [] - for x in nditer(a): + for x in i: + assert type(x) is ndarray + assert x.base is i + assert x.shape == () + assert x.strides == () + exc = raises(ValueError, "x[()] = 42") + assert str(exc.value) == 'assignment destination is read-only' r.append(x) assert r == [0, 1, 2, 3, 4, 5] r = [] - for x in nditer(a.T): r.append(x) assert r == [0, 1, 2, 3, 4, 5] @@ -29,9 +35,14 @@ assert r == [0, 3, 1, 4, 2, 5] def test_readwrite(self): - from numpy import arange, nditer + from numpy import arange, nditer, ndarray a = arange(6).reshape(2,3) - for x in nditer(a, op_flags=['readwrite']): + i = nditer(a, op_flags=['readwrite']) + for x in i: + assert type(x) is ndarray + assert x.base is i + assert x.shape == () + assert x.strides == () x[...] = 2 * x assert (a == [[0, 2, 4], [6, 8, 10]]).all() From noreply at buildbot.pypy.org Fri Apr 18 18:45:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 18:45:35 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify Message-ID: <20140418164535.C03AB1C073C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70757:aba89d1880eb Date: 2014-04-18 12:35 -0400 http://bitbucket.org/pypy/pypy/changeset/aba89d1880eb/ Log: simplify diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -10,20 +10,7 @@ shape_agreement, shape_agreement_multiple) -class AbstractIterator(object): - def done(self): - raise NotImplementedError("Abstract Class") - - def next(self): - raise NotImplementedError("Abstract Class") - - def getitem(self, space, array): - raise NotImplementedError("Abstract Class") - - -class IteratorMixin(object): - _mixin_ = True - +class BaseIterator(object): def __init__(self, nditer, it, op_flags): self.nditer = nditer self.it = it @@ -43,11 +30,11 @@ xxx -class BoxIterator(IteratorMixin, AbstractIterator): +class BoxIterator(BaseIterator): index = 0 -class ExternalLoopIterator(IteratorMixin, AbstractIterator): +class ExternalLoopIterator(BaseIterator): index = 1 From noreply at buildbot.pypy.org Fri Apr 18 18:45:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 18:45:36 +0200 (CEST) Subject: [pypy-commit] pypy default: more nditer cleanups Message-ID: <20140418164536.DEDA51C073C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70758:9afd3cb41ad2 Date: 2014-04-18 12:44 -0400 http://bitbucket.org/pypy/pypy/changeset/9afd3cb41ad2/ Log: more nditer cleanups diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -10,9 +10,10 @@ shape_agreement, shape_agreement_multiple) -class BaseIterator(object): - def __init__(self, nditer, it, op_flags): +class Iterator(object): + def __init__(self, nditer, index, it, op_flags): self.nditer = nditer + self.index = index self.it = it self.st = it.reset() self.op_flags = op_flags @@ -30,14 +31,6 @@ xxx -class BoxIterator(BaseIterator): - index = 0 - - -class ExternalLoopIterator(BaseIterator): - index = 1 - - def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): ret = [] if space.is_w(w_op_flags, space.w_None): @@ -243,15 +236,6 @@ return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) -def convert_to_array_or_none(space, w_elem): - ''' - None will be passed through, all others will be converted - ''' - if space.is_none(w_elem): - return None - return convert_to_array(space, w_elem) - - class IndexIterator(object): def __init__(self, shape, backward=False): self.shape = shape @@ -301,7 +285,9 @@ if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) - self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + self.seq = [convert_to_array(space, w_elem) + if not space.is_none(w_elem) else None + for w_elem in w_seq_as_list] else: self.seq = [convert_to_array(space, w_seq)] @@ -375,8 +361,9 @@ self.dtypes[i] = seq_d elif selfd != seq_d: if not 'r' in self.op_flags[i].tmp_copy: - raise OperationError(space.w_TypeError, space.wrap( - "Iterator operand required copying or buffering for operand %d" % i)) + raise oefmt(space.w_TypeError, + "Iterator operand required copying or " + "buffering for operand %d", i) impl = self.seq[i].implementation new_impl = impl.astype(space, selfd) self.seq[i] = W_NDimArray(new_impl) @@ -387,22 +374,23 @@ # create an iterator for each operand if self.external_loop: for i in range(len(self.seq)): - self.iters.append(ExternalLoopIterator( - self, + self.iters.append(Iterator( + self, 1, get_external_loop_iter( space, self.order, self.seq[i], iter_shape), self.op_flags[i])) else: for i in range(len(self.seq)): - self.iters.append(BoxIterator( - self, + self.iters.append(Iterator( + self, 0, get_iter( space, self.order, self.seq[i], iter_shape, self.dtypes[i]), self.op_flags[i])) def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): - raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + raise oefmt(space.w_ValueError, + "op_axes must be a tuple/list matching the number of ops") op_axes = space.listview(w_op_axes) l = -1 for w_axis in op_axes: @@ -411,10 +399,14 @@ if l == -1: l = axis_len elif axis_len != l: - raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) - self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + raise oefmt(space.w_ValueError, + "Each entry of op_axes must have the same size") + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 + for x in space.listview(w_axis)]) if l == -1: - raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise oefmt(space.w_ValueError, + "If op_axes is provided, at least one list of axes " + "must be contained within it") raise Exception('xxx TODO') # Check that values make sense: # - in bounds for each operand @@ -430,12 +422,12 @@ try: ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) except IndexError: - raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) + raise oefmt(space.w_IndexError, + "Iterator operand index %d is out of bounds", idx) return ret def descr_setitem(self, space, w_idx, w_value): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_len(self, space): space.wrap(len(self.iters)) @@ -477,29 +469,23 @@ return space.wrap(self.iternext()) def descr_copy(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_debug_print(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_enable_external_loop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") @unwrap_spec(axis=int) def descr_remove_axis(self, space, axis): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_remove_multi_index(self, space, w_multi_index): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_reset(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_operands(self, space): l_w = [] @@ -517,17 +503,16 @@ return space.wrap(self.done) def descr_get_has_delayed_bufalloc(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_has_index(self, space): return space.wrap(self.tracked_index in ["C", "F"]) def descr_get_index(self, space): if not self.tracked_index in ["C", "F"]: - raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + raise oefmt(space.w_ValueError, "Iterator does not have an index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.wrap(self.index_iter.getvalue()) def descr_get_has_multi_index(self, space): @@ -535,41 +520,34 @@ def descr_get_multi_index(self, space): if not self.tracked_index == "multi": - raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + raise oefmt(space.w_ValueError, "Iterator is not tracking a multi-index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.newtuple([space.wrap(x) for x in self.index_iter.index]) def descr_get_iterationneedsapi(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_iterindex(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_itersize(self, space): return space.wrap(support.product(self.shape)) def descr_get_itviews(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_ndim(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_nop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_shape(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_value(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") @unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), From noreply at buildbot.pypy.org Fri Apr 18 20:14:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 20:14:16 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: we now need bytes_w too Message-ID: <20140418181416.609331C02FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70759:9953e025bce7 Date: 2014-04-18 11:13 -0700 http://bitbucket.org/pypy/pypy/changeset/9953e025bce7/ Log: we now need bytes_w too diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1079,6 +1079,10 @@ assert isinstance(string, str) return string + def bytes_w(self, string): + assert isinstance(string, str) + return string + def unicode_w(self, string): assert isinstance(string, unicode) return string From noreply at buildbot.pypy.org Fri Apr 18 20:14:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 18 Apr 2014 20:14:17 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: fix BytesStrategy iter to wrapbytes Message-ID: <20140418181417.8D8001C02FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70760:724161bbeca1 Date: 2014-04-18 11:13 -0700 http://bitbucket.org/pypy/pypy/changeset/724161bbeca1/ Log: fix BytesStrategy iter to wrapbytes diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1439,7 +1439,7 @@ def next_entry(self): for key in self.iterator: - return self.space.wrap(key) + return self.space.wrapbytes(key) else: return None diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -89,6 +89,7 @@ from pypy.objspace.std.floatobject import W_FloatObject w = self.space.wrap + wb = self.space.wrapbytes intstr = self.space.fromcache(IntegerSetStrategy) tmp_func = intstr.get_storage_from_list # test if get_storage_from_list is no longer used @@ -100,10 +101,10 @@ assert w_set.strategy is intstr assert intstr.unerase(w_set.sstorage) == {1:None, 2:None, 3:None} - w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) + w_list = W_ListObject(self.space, [wb("1"), wb("2"), wb("3")]) w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) - assert w_set.strategy is self.space.fromcache(UnicodeSetStrategy) + assert w_set.strategy is self.space.fromcache(BytesSetStrategy) assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} w_list = self.space.iter(W_ListObject(self.space, [w(u"1"), w(u"2"), w(u"3")])) @@ -1005,6 +1006,13 @@ # gives us 1, but 1 is not in the set any longer. raises(RuntimeError, list, it) + def test_iter_bytes_strategy(self): + l = [b'a', b'b'] + s = set(l) + n = next(iter(s)) + assert type(n) is bytes + assert n in l + def test_unicodestrategy(self): s = 'àèìòù' myset = set([s]) From noreply at buildbot.pypy.org Fri Apr 18 21:13:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 21:13:09 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify nditer Message-ID: <20140418191309.EED291C02FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70761:667ad75d7ce9 Date: 2014-04-18 14:51 -0400 http://bitbucket.org/pypy/pypy/changeset/667ad75d7ce9/ Log: simplify nditer diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -164,38 +164,6 @@ self.array.setitem(state.offset, elem) -class SliceIterator(ArrayIter): - def __init__(self, arr, strides, backstrides, shape, order="C", - backward=False, dtype=None): - if dtype is None: - dtype = arr.implementation.dtype - self.dtype = dtype - self.arr = arr - if backward: - self.slicesize = shape[0] - self.gap = [support.product(shape[1:]) * dtype.elsize] - strides = strides[1:] - backstrides = backstrides[1:] - shape = shape[1:] - strides.reverse() - backstrides.reverse() - shape.reverse() - size = support.product(shape) - else: - shape = [support.product(shape)] - strides, backstrides = calc_strides(shape, dtype, order) - size = 1 - self.slicesize = support.product(shape) - self.gap = strides - ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) - - def getslice(self): - from pypy.module.micronumpy.concrete import SliceArray - return SliceArray(self.offset, self.gap, self.backstrides, - [self.slicesize], self.arr.implementation, - self.arr, self.dtype) - - def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -5,56 +5,33 @@ from pypy.module.micronumpy import ufuncs, support, concrete from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (calculate_broadcast_strides, shape_agreement, shape_agreement_multiple) -class Iterator(object): - def __init__(self, nditer, index, it, op_flags): - self.nditer = nditer - self.index = index - self.it = it - self.st = it.reset() - self.op_flags = op_flags - - def done(self): - return self.it.done(self.st) - - def next(self): - self.st = self.it.next(self.st) - - def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, self.nditer, self.it, self.st) - - def setitem(self, space, array, val): - xxx - - def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): - ret = [] if space.is_w(w_op_flags, space.w_None): - for i in range(n): - ret.append(OpFlag()) - elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + w_op_flags = space.newtuple([space.wrap('readonly')]) + if not space.isinstance_w(w_op_flags, space.w_tuple) and not \ space.isinstance_w(w_op_flags, space.w_list): raise oefmt(space.w_ValueError, '%s must be a tuple or array of per-op flag-tuples', name) + ret = [] + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) else: - w_lst = space.listview(w_op_flags) - if space.isinstance_w(w_lst[0], space.w_tuple) or \ - space.isinstance_w(w_lst[0], space.w_list): - if len(w_lst) != n: - raise oefmt(space.w_ValueError, - '%s must be a tuple or array of per-op flag-tuples', - name) - for item in w_lst: - ret.append(parse_one_arg(space, space.listview(item))) - else: - op_flag = parse_one_arg(space, w_lst) - for i in range(n): - ret.append(op_flag) + op_flag = parse_one_arg(space, w_lst) + for i in range(n): + ret.append(op_flag) return ret @@ -67,29 +44,6 @@ self.native_byte_order = False self.tmp_copy = '' self.allocate = False - self.get_it_item = (get_readonly_item, get_readonly_slice) - - -def get_readonly_item(space, nditer, it, st): - res = concrete.ConcreteNonWritableArrayWithBase( - [], it.array.dtype, it.array.order, [], [], it.array.storage, nditer) - res.start = st.offset - return W_NDimArray(res) - - -def get_readwrite_item(space, nditer, it, st): - res = concrete.ConcreteArrayWithBase( - [], it.array.dtype, it.array.order, [], [], it.array.storage, nditer) - res.start = st.offset - return W_NDimArray(res) - - -def get_readonly_slice(space, array, it): - return W_NDimArray(it.getslice().readonly()) - - -def get_readwrite_slice(space, array, it): - return W_NDimArray(it.getslice()) def parse_op_flag(space, lst): @@ -128,17 +82,10 @@ else: raise OperationError(space.w_ValueError, space.wrap( 'op_flags must be a tuple or array of per-op flag-tuples')) - if op_flag.rw == '': - raise oefmt(space.w_ValueError, - "None of the iterator flags READWRITE, READONLY, or " - "WRITEONLY were specified for an operand") - elif op_flag.rw == 'r': - op_flag.get_it_item = (get_readonly_item, get_readonly_slice) - elif op_flag.rw == 'rw': - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) - elif op_flag.rw == 'w': - # XXX Extra logic needed to make sure writeonly - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + if op_flag.rw == '': + raise oefmt(space.w_ValueError, + "None of the iterator flags READWRITE, READONLY, or " + "WRITEONLY were specified for an operand") return op_flag @@ -230,12 +177,6 @@ return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) -def get_external_loop_iter(space, order, arr, shape): - imp = arr.implementation - backward = is_backward(imp, order) - return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) - - class IndexIterator(object): def __init__(self, shape, backward=False): self.shape = shape @@ -326,8 +267,6 @@ out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: - self.op_flags[i].get_it_item = (get_readwrite_item, - get_readwrite_slice) self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': @@ -372,20 +311,9 @@ self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand - if self.external_loop: - for i in range(len(self.seq)): - self.iters.append(Iterator( - self, 1, - get_external_loop_iter( - space, self.order, self.seq[i], iter_shape), - self.op_flags[i])) - else: - for i in range(len(self.seq)): - self.iters.append(Iterator( - self, 0, - get_iter( - space, self.order, self.seq[i], iter_shape, self.dtypes[i]), - self.op_flags[i])) + for i in range(len(self.seq)): + it = get_iter(space, self.order, self.seq[i], iter_shape, self.dtypes[i]) + self.iters.append((it, it.reset())) def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): @@ -417,14 +345,24 @@ def descr_iter(self, space): return space.wrap(self) + def getitem(self, it, st, op_flags): + if op_flags.rw == 'r': + impl = concrete.ConcreteNonWritableArrayWithBase + else: + impl = concrete.ConcreteArrayWithBase + res = impl([], it.array.dtype, it.array.order, [], [], + it.array.storage, self) + res.start = st.offset + return W_NDimArray(res) + def descr_getitem(self, space, w_idx): idx = space.int_w(w_idx) try: - ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + it, st = self.iters[idx] except IndexError: raise oefmt(space.w_IndexError, "Iterator operand index %d is out of bounds", idx) - return ret + return self.getitem(it, st, self.op_flags[idx]) def descr_setitem(self, space, w_idx, w_value): raise oefmt(space.w_NotImplementedError, "not implemented yet") @@ -433,8 +371,8 @@ space.wrap(len(self.iters)) def descr_next(self, space): - for it in self.iters: - if not it.done(): + for it, st in self.iters: + if not it.done(st): break else: self.done = True @@ -445,9 +383,9 @@ self.index_iter.next() else: self.first_next = False - for i in range(len(self.iters)): - res.append(self.iters[i].getitem(space, self.seq[i])) - self.iters[i].next() + for i, (it, st) in enumerate(self.iters): + res.append(self.getitem(it, st, self.op_flags[i])) + self.iters[i] = (it, it.next(st)) if len(res) < 2: return res[0] return space.newtuple(res) @@ -455,10 +393,10 @@ def iternext(self): if self.index_iter: self.index_iter.next() - for i in range(len(self.iters)): - self.iters[i].next() - for it in self.iters: - if not it.done(): + for i, (it, st) in enumerate(self.iters): + self.iters[i] = (it, it.next(st)) + for it, st in self.iters: + if not it.done(st): break else: self.done = True From noreply at buildbot.pypy.org Fri Apr 18 23:09:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 23:09:40 +0200 (CEST) Subject: [pypy-commit] pypy default: small cleanups for flatiter Message-ID: <20140418210940.2EFD11C01F4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70762:acdd090d79d7 Date: 2014-04-18 16:28 -0400 http://bitbucket.org/pypy/pypy/changeset/acdd090d79d7/ Log: small cleanups for flatiter diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -35,7 +35,7 @@ self.iter, self.state = self.base.create_iter() def descr_len(self, space): - return space.wrap(self.base.get_size()) + return space.wrap(self.iter.size) def descr_next(self, space): if self.iter.done(self.state): @@ -48,8 +48,7 @@ return space.wrap(self.state.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.state.index)) - return space.newtuple([space.wrap(c) for c in coords]) + return space.newtuple([space.wrap(c) for c in self.state.indices]) def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or From noreply at buildbot.pypy.org Fri Apr 18 23:09:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 18 Apr 2014 23:09:41 +0200 (CEST) Subject: [pypy-commit] pypy default: support more variations of ndarray.item() Message-ID: <20140418210941.70DBD1C01F4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70763:142661a32d5b Date: 2014-04-18 17:05 -0400 http://bitbucket.org/pypy/pypy/changeset/142661a32d5b/ Log: support more variations of ndarray.item() diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -18,7 +18,7 @@ multi_axis_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator -from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ +from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple @@ -469,29 +469,33 @@ def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) - def to_coords(self, space, w_index): - coords, _, _ = to_coords(space, self.get_shape(), - self.get_size(), self.get_order(), - w_index) - return coords - - def descr_item(self, space, w_arg=None): - if space.is_none(w_arg): + def descr_item(self, space, __args__): + args_w, kw_w = __args__.unpack() + if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): + args_w = space.fixedview(args_w[0]) + shape = self.get_shape() + coords = [0] * len(shape) + if len(args_w) == 0: if self.get_size() == 1: w_obj = self.get_scalar_value() assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, "can only convert an array of size 1 to a Python scalar") - if space.isinstance_w(w_arg, space.w_int): - if self.is_scalar(): - raise oefmt(space.w_IndexError, "index out of bounds") - i = self.to_coords(space, w_arg) - item = self.getitem(space, i) - assert isinstance(item, boxes.W_GenericBox) - return item.item(space) - raise OperationError(space.w_NotImplementedError, space.wrap( - "non-int arg not supported")) + elif len(args_w) == 1 and len(shape) != 1: + value = support.index_w(space, args_w[0]) + value = support.check_and_adjust_index(space, value, self.get_size(), -1) + for idim in range(len(shape) - 1, -1, -1): + coords[idim] = value % shape[idim] + value //= shape[idim] + elif len(args_w) == len(shape): + for idim in range(len(shape)): + coords[idim] = support.index_w(space, args_w[idim]) + else: + raise oefmt(space.w_ValueError, "incorrect number of indices for array") + item = self.getitem(space, coords) + assert isinstance(item, boxes.W_GenericBox) + return item.item(space) def descr_itemset(self, space, args_w): if len(args_w) == 0: diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -233,30 +233,6 @@ return dtype -def to_coords(space, shape, size, order, w_item_or_slice): - '''Returns a start coord, step, and length. - ''' - start = lngth = step = 0 - if not (space.isinstance_w(w_item_or_slice, space.w_int) or - space.isinstance_w(w_item_or_slice, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) - - start, stop, step, lngth = space.decode_index4(w_item_or_slice, size) - - coords = [0] * len(shape) - i = start - if order == 'C': - for s in range(len(shape) -1, -1, -1): - coords[s] = i % shape[s] - i //= shape[s] - else: - for s in range(len(shape)): - coords[s] = i % shape[s] - i //= shape[s] - return coords, step, lngth - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -25,3 +25,18 @@ for x in s: i *= x return i + + +def check_and_adjust_index(space, index, size, axis): + if index < -size or index >= size: + if axis >= 0: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for axis %d with size %d", + index, axis, size) + else: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for size %d", + index, size) + if index < 0: + index += size + return index diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -164,24 +164,6 @@ assert calc_new_strides([1, 1, 105, 1, 1], [7, 15], [1, 7],'F') == \ [1, 1, 1, 105, 105] - def test_to_coords(self): - from pypy.module.micronumpy.strides import to_coords - - def _to_coords(index, order): - return to_coords(self.space, [2, 3, 4], 24, order, - self.space.wrap(index))[0] - - assert _to_coords(0, 'C') == [0, 0, 0] - assert _to_coords(1, 'C') == [0, 0, 1] - assert _to_coords(-1, 'C') == [1, 2, 3] - assert _to_coords(5, 'C') == [0, 1, 1] - assert _to_coords(13, 'C') == [1, 0, 1] - assert _to_coords(0, 'F') == [0, 0, 0] - assert _to_coords(1, 'F') == [1, 0, 0] - assert _to_coords(-1, 'F') == [1, 2, 3] - assert _to_coords(5, 'F') == [1, 2, 0] - assert _to_coords(13, 'F') == [1, 0, 2] - def test_find_shape(self): from pypy.module.micronumpy.strides import find_shape_and_elems @@ -2988,12 +2970,14 @@ raises((IndexError, ValueError), "a.compress([1] * 100)") def test_item(self): + import numpy as np from numpypy import array assert array(3).item() == 3 assert type(array(3).item()) is int assert type(array(True).item()) is bool assert type(array(3.5).item()) is float - raises(IndexError, "array(3).item(15)") + exc = raises(IndexError, "array(3).item(15)") + assert str(exc.value) == 'index 15 is out of bounds for size 1' raises(ValueError, "array([1, 2, 3]).item()") assert array([3]).item(0) == 3 assert type(array([3]).item(0)) is int @@ -3012,6 +2996,11 @@ assert type(b[1]) is str assert b[0] == 1 assert b[1] == 'ab' + a = np.arange(24).reshape(2, 4, 3) + assert a.item(1, 1, 1) == 16 + assert a.item((1, 1, 1)) == 16 + exc = raises(ValueError, a.item, 1, 1, 1, 1) + assert str(exc.value) == "incorrect number of indices for array" def test_itemset(self): import numpy as np From noreply at buildbot.pypy.org Sat Apr 19 03:03:51 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 19 Apr 2014 03:03:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Persist the file descriptor used by os.urandom, which speeds up individual calls to it Message-ID: <20140419010351.E661E1C01F4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70764:6810f401d08e Date: 2014-04-18 18:03 -0700 http://bitbucket.org/pypy/pypy/changeset/6810f401d08e/ Log: Persist the file descriptor used by os.urandom, which speeds up individual calls to it diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -5,11 +5,13 @@ import os, sys import errno +from rpython.rtyper.lltypesystem import lltype, rffi + + if sys.platform == 'win32': from rpython.rlib import rwin32 from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.tool import rffi_platform - from rpython.rtyper.lltypesystem import lltype, rffi eci = ExternalCompilationInfo( includes = ['windows.h', 'wincrypt.h'], @@ -81,25 +83,28 @@ return buf.str(n) else: # Posix implementation def init_urandom(): - pass + """NOT_RPYTHON + Return an array of one int, initialized to 0. + It is filled automatically the first time urandom() is called. + """ + return lltype.malloc(rffi.CArray(lltype.Signed), 1, + immortal=True, zero=True) def urandom(context, n): "Read n bytes from /dev/urandom." result = '' if n == 0: return result - fd = os.open("/dev/urandom", os.O_RDONLY, 0777) - try: - while n > 0: - try: - data = os.read(fd, n) - except OSError, e: - if e.errno != errno.EINTR: - raise - data = '' - result += data - n -= len(data) - finally: - os.close(fd) + if not context[0]: + context[0] = os.open("/dev/urandom", os.O_RDONLY, 0777) + while n > 0: + try: + data = os.read(context[0], n) + except OSError, e: + if e.errno != errno.EINTR: + raise + data = '' + result += data + n -= len(data) return result From noreply at buildbot.pypy.org Sat Apr 19 04:39:00 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 19 Apr 2014 04:39:00 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: rejig test skipping and use of dummy backend Message-ID: <20140419023900.832FE1C073C@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70766:2a3916aaa1b8 Date: 2014-04-18 19:38 -0700 http://bitbucket.org/pypy/pypy/changeset/2a3916aaa1b8/ Log: rejig test skipping and use of dummy backend diff --git a/pypy/module/cppyy/test/Makefile b/pypy/module/cppyy/test/Makefile --- a/pypy/module/cppyy/test/Makefile +++ b/pypy/module/cppyy/test/Makefile @@ -5,8 +5,10 @@ ifneq (${REFLEXHOME},) ROOTSYS := ${REFLEXHOME} +else ifneq (${ROOTSYS},) + ROOTSYS := ${ROOTSYS} else - ROOTSYS := ${ROOTSYS} + DUMMY := t endif ifeq ($(DUMMY),t) diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -2,32 +2,38 @@ @py.test.mark.tryfirst def pytest_runtest_setup(item): - print item if py.path.local.sysfind('genreflex') is None: - #py.test.skip("genreflex is not installed") - - # build the dummy CAPI - - import os - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.translator import platform - - from rpython.rtyper.lltypesystem import rffi - - pkgpath = py.path.local(__file__).dirpath().join(os.pardir) - srcpath = pkgpath.join('src') - incpath = pkgpath.join('include') - - eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join('dummy_backend.cxx')], - include_dirs=[incpath], - use_cpp_linker=True, - ) - - soname = platform.platform.compile( - [], eci, - outputfilename='libcppyy_backend', - standalone=False) + if not item.location[0] in ['test_helper.py', 'test_cppyy.py'] or \ + (item.location[0] == 'test_cppyy.py' and not 'TestCPPYYImplementation' in item.location[2]): + py.test.skip("genreflex is not installed") import pypy.module.cppyy.capi.loadable_capi as lcapi - lcapi.reflection_library = str(soname) + try: + import ctypes + ctypes.CDLL(lcapi.reflection_library) + except Exception, e: + import os + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.translator import platform + + from rpython.rtyper.lltypesystem import rffi + + pkgpath = py.path.local(__file__).dirpath().join(os.pardir) + srcpath = pkgpath.join('src') + incpath = pkgpath.join('include') + + eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join('dummy_backend.cxx')], + include_dirs=[incpath], + use_cpp_linker=True, + ) + + soname = platform.platform.compile( + [], eci, + outputfilename='libcppyy_backend', + standalone=False) + + lcapi.reflection_library = str(soname) + + lcapi.isdummy = True + diff --git a/pypy/module/cppyy/test/test_aclassloader.py b/pypy/module/cppyy/test/test_aclassloader.py --- a/pypy/module/cppyy/test/test_aclassloader.py +++ b/pypy/module/cppyy/test/test_aclassloader.py @@ -1,7 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -1,8 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") - from pypy.module.cppyy import capi diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -1,9 +1,5 @@ import py, os, sys -isdummy = '' -if py.path.local.sysfind('genreflex') is None: - isdummy = 'DUMMY=t' - from pypy.module.cppyy import interp_cppyy, executor @@ -13,7 +9,8 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - err = os.system("cd '%s' && make %s example01Dict.so" % (currpath, isdummy)) + import pypy.module.cppyy.capi.loadable_capi as lcapi + err = os.system("cd '%s' && make example01Dict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") @@ -36,9 +33,6 @@ spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) def setup_class(cls): - if isdummy: - py.test.skip('skipping further tests in dummy mode') - cls.w_example01, cls.w_payload = cls.space.unpackiterable(cls.space.appexec([], """(): import cppyy cppyy.load_reflection_info(%r) diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/cppyy/test/test_crossing.py @@ -1,8 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") - from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator import platform diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -1,7 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("datatypesDict.so")) diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -1,8 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") - from pypy.module.cppyy import capi diff --git a/pypy/module/cppyy/test/test_operators.py b/pypy/module/cppyy/test/test_operators.py --- a/pypy/module/cppyy/test/test_operators.py +++ b/pypy/module/cppyy/test/test_operators.py @@ -1,7 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("operatorsDict.so")) diff --git a/pypy/module/cppyy/test/test_overloads.py b/pypy/module/cppyy/test/test_overloads.py --- a/pypy/module/cppyy/test/test_overloads.py +++ b/pypy/module/cppyy/test/test_overloads.py @@ -1,7 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("overloadsDict.so")) diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -1,8 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") - from pypy.module.cppyy import interp_cppyy, executor diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -1,8 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") - currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("stltypesDict.so")) diff --git a/pypy/module/cppyy/test/test_streams.py b/pypy/module/cppyy/test/test_streams.py --- a/pypy/module/cppyy/test/test_streams.py +++ b/pypy/module/cppyy/test/test_streams.py @@ -1,8 +1,5 @@ import py, os, sys -if py.path.local.sysfind('genreflex') is None: - py.test.skip("genreflex is not installed") - currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("std_streamsDict.so")) From noreply at buildbot.pypy.org Sat Apr 19 10:25:15 2014 From: noreply at buildbot.pypy.org (Jason Myers) Date: Sat, 19 Apr 2014 10:25:15 +0200 (CEST) Subject: [pypy-commit] cffi jasonandersonmyers/fix-broken-link-to-weave-1397835057617: Fix broken link to Weave Message-ID: <20140419082515.A07491D23CD@cobra.cs.uni-duesseldorf.de> Author: Jason Myers Branch: jasonandersonmyers/fix-broken-link-to-weave-1397835057617 Changeset: r1501:6f5361f432f1 Date: 2014-04-18 15:31 +0000 http://bitbucket.org/cffi/cffi/changeset/6f5361f432f1/ Log: Fix broken link to Weave diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ .. _`CPython native C extensions`: http://docs.python.org/extending/extending.html .. _`native C extensions`: http://docs.python.org/extending/extending.html .. _`ctypes`: http://docs.python.org/library/ctypes.html -.. _`Weave`: http://www.scipy.org/Weave +.. _`Weave`: http://wiki.scipy.org/Weave .. _`manually wrap`: `The verification step`_ From noreply at buildbot.pypy.org Sat Apr 19 10:25:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 10:25:16 +0200 (CEST) Subject: [pypy-commit] cffi default: Merged in jasonandersonmyers/cffi/jasonandersonmyers/fix-broken-link-to-weave-1397835057617 (pull request #32) Message-ID: <20140419082516.B9D281D23CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1502:5a6929f0ccf0 Date: 2014-04-19 10:25 +0200 http://bitbucket.org/cffi/cffi/changeset/5a6929f0ccf0/ Log: Merged in jasonandersonmyers/cffi/jasonandersonmyers/fix-broken- link-to-weave-1397835057617 (pull request #32) Fix broken link to Weave diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ .. _`CPython native C extensions`: http://docs.python.org/extending/extending.html .. _`native C extensions`: http://docs.python.org/extending/extending.html .. _`ctypes`: http://docs.python.org/library/ctypes.html -.. _`Weave`: http://www.scipy.org/Weave +.. _`Weave`: http://wiki.scipy.org/Weave .. _`manually wrap`: `The verification step`_ From noreply at buildbot.pypy.org Sat Apr 19 12:31:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 12:31:35 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Another convenient marker macro, and test for all three macros Message-ID: <20140419103135.CC92A1C3512@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1167:3b0945ccae76 Date: 2014-04-19 12:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/3b0945ccae76/ Log: Another convenient marker macro, and test for all three macros diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -287,6 +287,17 @@ _popped; \ }) +#define STM_UPDATE_MARKER_NUM(tl, odd_num) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + struct stm_shadowentry_s *_ss = (tl).shadowstack - 2; \ + while (!(((uintptr_t)(_ss->ss)) & 1)) { \ + _ss--; \ + assert(_ss >= (tl).shadowstack_base); \ + } \ + _ss->ss = (object_t *)_odd_num; \ +} while (0) + /* Every thread needs to have a corresponding stm_thread_local_t structure. It may be a "__thread" global variable or something else. diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -125,6 +125,10 @@ void (*stmcb_expand_marker)(uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); + +void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); +void stm_update_marker_num(stm_thread_local_t *, uintptr_t); +void stm_pop_marker(stm_thread_local_t *); """) @@ -279,6 +283,21 @@ } } +void stm_push_marker(stm_thread_local_t *tl, uintptr_t onum, object_t *ob) +{ + STM_PUSH_MARKER(*tl, onum, ob); +} + +void stm_update_marker_num(stm_thread_local_t *tl, uintptr_t onum) +{ + STM_UPDATE_MARKER_NUM(*tl, onum); +} + +void stm_pop_marker(stm_thread_local_t *tl) +{ + STM_POP_MARKER(*tl); +} + ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_LARGEMALLOC_TEST', '1'), diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -80,3 +80,37 @@ assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER assert 0.099 <= tl.longest_marker_time <= 0.9 assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) + + def test_macros(self): + self.start_transaction() + p = stm_allocate(16) + tl = self.get_stm_thread_local() + lib.stm_push_marker(tl, 29, p) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 29) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + lib.stm_update_marker_num(tl, 27) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 27) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + self.push_root(p) + lib.stm_update_marker_num(tl, 27) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 27) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + lib.stm_pop_marker(tl) + py.test.raises(EmptyStack, self.pop_root) From noreply at buildbot.pypy.org Sat Apr 19 12:39:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 12:39:10 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Move the macros. Add a function, for tests mostly Message-ID: <20140419103910.6FD741C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1168:035e1c7879be Date: 2014-04-19 12:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/035e1c7879be/ Log: Move the macros. Add a function, for tests mostly diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -34,6 +34,15 @@ } } +char *_stm_expand_marker(void) +{ + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + pseg->marker_self[0] = 0; + marker_fetch_expand(pseg); + return pseg->marker_self; +} + static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -274,30 +274,6 @@ #define STM_STACK_MARKER_NEW 2 #define STM_STACK_MARKER_OLD 6 -#define STM_PUSH_MARKER(tl, odd_num, p) do { \ - uintptr_t _odd_num = (odd_num); \ - assert(_odd_num & 1); \ - STM_PUSH_ROOT(tl, _odd_num); \ - STM_PUSH_ROOT(tl, p); \ -} while (0) - -#define STM_POP_MARKER(tl) ({ \ - object_t *_popped = STM_POP_ROOT_RET(tl); \ - STM_POP_ROOT_RET(tl); \ - _popped; \ -}) - -#define STM_UPDATE_MARKER_NUM(tl, odd_num) do { \ - uintptr_t _odd_num = (odd_num); \ - assert(_odd_num & 1); \ - struct stm_shadowentry_s *_ss = (tl).shadowstack - 2; \ - while (!(((uintptr_t)(_ss->ss)) & 1)) { \ - _ss--; \ - assert(_ss >= (tl).shadowstack_base); \ - } \ - _ss->ss = (object_t *)_odd_num; \ -} while (0) - /* Every thread needs to have a corresponding stm_thread_local_t structure. It may be a "__thread" global variable or something else. @@ -406,6 +382,33 @@ object_t *following_object, char *outputbuf, size_t outputbufsize); +/* Conventience macros to push the markers into the shadowstack */ +#define STM_PUSH_MARKER(tl, odd_num, p) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + STM_PUSH_ROOT(tl, _odd_num); \ + STM_PUSH_ROOT(tl, p); \ +} while (0) + +#define STM_POP_MARKER(tl) ({ \ + object_t *_popped = STM_POP_ROOT_RET(tl); \ + STM_POP_ROOT_RET(tl); \ + _popped; \ +}) + +#define STM_UPDATE_MARKER_NUM(tl, odd_num) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + struct stm_shadowentry_s *_ss = (tl).shadowstack - 2; \ + while (!(((uintptr_t)(_ss->ss)) & 1)) { \ + _ss--; \ + assert(_ss >= (tl).shadowstack_base); \ + } \ + _ss->ss = (object_t *)_odd_num; \ +} while (0) + +char *_stm_expand_marker(void); + /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -129,6 +129,7 @@ void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); void stm_pop_marker(stm_thread_local_t *); +char *_stm_expand_marker(void); """) diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -114,3 +114,19 @@ lib.stm_push_marker(tl, 29, p) lib.stm_pop_marker(tl) py.test.raises(EmptyStack, self.pop_root) + + def test_stm_expand_marker(self): + @ffi.callback("void(uintptr_t, object_t *, char *, size_t)") + def expand_marker(number, ptr, outbuf, outbufsize): + s = '%d %r\x00' % (number, ptr) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + self.push_root(stm_allocate(32)) + self.push_root(stm_allocate(16)) + raw = lib._stm_expand_marker() + assert ffi.string(raw) == '29 %r' % (p,) From noreply at buildbot.pypy.org Sat Apr 19 14:05:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 14:05:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/035e1c7879be (from branch "marker") Message-ID: <20140419120559.831471D2411@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70767:89b063cce2ab Date: 2014-04-19 12:39 +0200 http://bitbucket.org/pypy/pypy/changeset/89b063cce2ab/ Log: import stmgc/035e1c7879be (from branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -918b1901b1f9 +035e1c7879be diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -621,6 +621,9 @@ (int)pseg->transaction_state); } + /* look up and preserve the marker information as a string */ + marker_fetch_expand(pseg); + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -631,6 +634,7 @@ value before the transaction start */ stm_thread_local_t *tl = pseg->pub.running_thread; assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); + pseg->shadowstack_at_abort = tl->shadowstack; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -153,11 +153,15 @@ 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; + struct stm_shadowentry_s *shadowstack_at_abort; /* For debugging */ #ifndef NDEBUG pthread_t running_pthread; #endif + + /* Temporarily stores the marker information */ + char marker_self[_STM_MARKER_LEN]; }; enum /* safe_point */ { diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -380,7 +380,7 @@ struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - if (((uintptr_t)current->ss) > STM_STACK_MARKER_OLD) + if ((((uintptr_t)current->ss) & 3) == 0) mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -0,0 +1,65 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +void (*stmcb_expand_marker)(uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); + + +static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) +{ + if (pseg->marker_self[0] != 0) + return; /* already collected an entry */ + + if (stmcb_expand_marker != NULL) { + stm_thread_local_t *tl = pseg->pub.running_thread; + struct stm_shadowentry_s *current = tl->shadowstack - 1; + struct stm_shadowentry_s *base = tl->shadowstack_base; + while (--current >= base) { + uintptr_t x = (uintptr_t)current->ss; + if (x & 1) { + /* the stack entry is an odd number */ + stmcb_expand_marker(x, current[1].ss, + pseg->marker_self, _STM_MARKER_LEN); + + if (pseg->marker_self[0] == 0) { + pseg->marker_self[0] = '?'; + pseg->marker_self[1] = 0; + } + break; + } + } + } +} + +char *_stm_expand_marker(void) +{ + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + pseg->marker_self[0] = 0; + marker_fetch_expand(pseg); + return pseg->marker_self; +} + +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time) +{ + /* Copies the marker information from pseg to tl. This is called + indirectly from abort_with_mutex(), but only if the lost time is + greater than that of the previous recorded marker. By contrast, + pseg->marker_self has been filled already in all cases. The + reason for the two steps is that we must fill pseg->marker_self + earlier than now (some objects may be GCed), but we only know + here the total time it gets attributed. + */ + if (time * 0.99 > tl->longest_marker_time) { + tl->longest_marker_state = attribute_to; + tl->longest_marker_time = time; + memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + } + pseg->marker_self[0] = 0; +} diff --git a/rpython/translator/stm/src_stm/stm/marker.h b/rpython/translator/stm/src_stm/stm/marker.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/marker.h @@ -0,0 +1,6 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ + +static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg); +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -161,28 +161,26 @@ --current; OPT_ASSERT(current >= base); - switch ((uintptr_t)current->ss) { + uintptr_t x = (uintptr_t)current->ss; - case 0: /* NULL */ - continue; - - case STM_STACK_MARKER_NEW: + if ((x & 3) == 0) { + /* the stack entry is a regular pointer (possibly NULL) */ + minor_trace_if_young(¤t->ss); + } + else if (x == STM_STACK_MARKER_NEW) { /* the marker was not already seen: mark it as seen, but continue looking more deeply in the shadowstack */ current->ss = (object_t *)STM_STACK_MARKER_OLD; - continue; - - case STM_STACK_MARKER_OLD: + } + else if (x == STM_STACK_MARKER_OLD) { /* the marker was already seen: we can stop the root stack tracing at this point */ - goto interrupt; - - default: - /* the stack entry is a regular pointer */ - minor_trace_if_young(¤t->ss); + break; + } + else { + /* it is an odd-valued marker, ignore */ } } - interrupt: minor_trace_if_young(&tl->thread_local_obj); } diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -36,8 +36,15 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); - add_timing(tl, attribute_to, tl->timing[STM_TIME_RUN_CURRENT]); + double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT]; + add_timing(tl, attribute_to, time_this_transaction); tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; + + if (attribute_to != STM_TIME_RUN_COMMITTED) { + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + marker_copy(tl, pseg, attribute_to, time_this_transaction); + } } static const char *timer_names[] = { @@ -74,6 +81,10 @@ fprintf(stderr, " %-24s %9u %8.3f s\n", timer_names[i], tl->events[i], (double)tl->timing[i]); } + fprintf(stderr, " %-24s %6s %11.6f s\n", + "longest recorded marker", "", tl->longest_marker_time); + fprintf(stderr, " \"%.*s\"\n", + (int)_STM_MARKER_LEN, tl->longest_marker_self); s_mutex_unlock(); } } diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -15,6 +15,7 @@ #include "stm/fprintcolor.h" #include "stm/weakref.h" #include "stm/timing.h" +#include "stm/marker.h" #include "stm/misc.c" #include "stm/list.c" @@ -34,3 +35,4 @@ #include "stm/fprintcolor.c" #include "stm/weakref.c" #include "stm/timing.c" +#include "stm/marker.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -74,6 +74,8 @@ _STM_TIME_N }; +#define _STM_MARKER_LEN 80 + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -91,6 +93,11 @@ float timing[_STM_TIME_N]; double _timing_cur_start; enum stm_time_e _timing_cur_state; + /* the marker with the longest associated time so far */ + enum stm_time_e longest_marker_state; + double longest_marker_time; + char longest_marker_self[_STM_MARKER_LEN]; + char longest_marker_other[_STM_MARKER_LEN]; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -265,8 +272,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) -#define STM_STACK_MARKER_NEW 1 -#define STM_STACK_MARKER_OLD 2 +#define STM_STACK_MARKER_NEW 2 +#define STM_STACK_MARKER_OLD 6 /* Every thread needs to have a corresponding stm_thread_local_t @@ -369,6 +376,41 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose); +/* The markers pushed in the shadowstack are an odd number followed by a + regular pointer. When needed, this library invokes this callback to + turn this pair into a human-readable explanation. */ +extern void (*stmcb_expand_marker)(uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); + +/* Conventience macros to push the markers into the shadowstack */ +#define STM_PUSH_MARKER(tl, odd_num, p) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + STM_PUSH_ROOT(tl, _odd_num); \ + STM_PUSH_ROOT(tl, p); \ +} while (0) + +#define STM_POP_MARKER(tl) ({ \ + object_t *_popped = STM_POP_ROOT_RET(tl); \ + STM_POP_ROOT_RET(tl); \ + _popped; \ +}) + +#define STM_UPDATE_MARKER_NUM(tl, odd_num) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + struct stm_shadowentry_s *_ss = (tl).shadowstack - 2; \ + while (!(((uintptr_t)(_ss->ss)) & 1)) { \ + _ss--; \ + assert(_ss >= (tl).shadowstack_base); \ + } \ + _ss->ss = (object_t *)_odd_num; \ +} while (0) + +char *_stm_expand_marker(void); + + /* ==================== END ==================== */ #endif From noreply at buildbot.pypy.org Sat Apr 19 14:06:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 14:06:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140419120600.C31941D2411@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70768:a76fc199431f Date: 2014-04-19 13:48 +0200 http://bitbucket.org/pypy/pypy/changeset/a76fc199431f/ Log: in-progress diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -44,9 +44,19 @@ return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname) -stmonly_jitdriver = jit.JitDriver(greens=[], reds=['next_instr', 'ec', - 'self', 'co_code'], - stm_do_transaction_breaks=True) +# ____________________________________________________________ + +class PyPyJitDriver(jit.JitDriver): + reds = ['frame', 'ec'] + greens = ['next_instr', 'is_being_profiled', 'pycode'] + virtualizables = ['frame'] + stm_do_transaction_breaks = True + is_main_for_pypy = True # XXX temporary: turning 'greens' into a string + # is hard-coded in C code. Don't change 'greens' + +stmonly_jitdriver = PyPyJitDriver() + +# ____________________________________________________________ opcodedesc = bytecode_spec.opcodedesc HAVE_ARGUMENT = bytecode_spec.HAVE_ARGUMENT @@ -61,6 +71,7 @@ # For the sequel, force 'next_instr' to be unsigned for performance next_instr = r_uint(next_instr) co_code = pycode.co_code + rstm.push_marker(intmask(next_instr) * 2 + 1, pycode) try: while True: @@ -71,8 +82,11 @@ self=self, co_code=co_code, next_instr=next_instr, ec=ec) next_instr = self.handle_bytecode(co_code, next_instr, ec) + rstm.update_marker_num(intmask(next_instr) * 2 + 1) except ExitFrame: return self.popvalue() + finally: + rstm.pop_marker() def handle_bytecode(self, co_code, next_instr, ec): try: @@ -467,6 +481,8 @@ opcodedesc.LOAD_FAST.index): return next_instr + rstm.update_marker_num(intmask(next_instr) * 2 + 1) + @jit.unroll_safe def unrollstack(self, unroller_kind): while self.blockstack_non_empty(): diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,6 +12,7 @@ from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield +from pypy.interpreter.pyopcode import PyPyJitDriver from opcode import opmap @@ -36,16 +37,10 @@ def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 -class PyPyJitDriver(JitDriver): - reds = ['frame', 'ec'] - greens = ['next_instr', 'is_being_profiled', 'pycode'] - virtualizables = ['frame'] - pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, should_unroll_one_iteration = should_unroll_one_iteration, - name='pypyjit', - stm_do_transaction_breaks=True) + name='pypyjit') class __extend__(PyFrame): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -490,7 +490,7 @@ get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, name='jitdriver', check_untranslated=True, - stm_do_transaction_breaks=False): + stm_do_transaction_breaks=None): if greens is not None: self.greens = greens self.name = name @@ -526,7 +526,8 @@ self.can_never_inline = can_never_inline self.should_unroll_one_iteration = should_unroll_one_iteration self.check_untranslated = check_untranslated - self.stm_do_transaction_breaks = stm_do_transaction_breaks + if stm_do_transaction_breaks is not None: + self.stm_do_transaction_breaks = stm_do_transaction_breaks def _freeze_(self): return True diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -122,6 +122,16 @@ invoke_around_extcall(before_external_call, after_external_call, enter_callback_call, leave_callback_call) + at specialize.argtype(1) +def push_marker(odd_num, object): + llop.stm_push_marker(lltype.Void, odd_num, object) + +def update_marker_num(odd_num): + llop.stm_update_marker_num(lltype.Void, odd_num) + +def pop_marker(): + llop.stm_pop_marker(lltype.Void) + # ____________________________________________________________ def make_perform_transaction(func, CONTAINERP): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -451,6 +451,12 @@ 'stm_ignored_start': LLOp(canrun=True), 'stm_ignored_stop': LLOp(canrun=True), + 'stm_push_marker': LLOp(canrun=True), + 'stm_update_marker_num': LLOp(canrun=True), + 'stm_pop_marker': LLOp(canrun=True), + 'stm_expand_marker': LLOp(), + 'stm_setup_expand_marker_for_pypy': LLOp(), + ## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_become_inevitable': LLOp(canmallocgc=True), ## 'stm_stop_all_other_threads': LLOp(canmallocgc=True), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -680,8 +680,14 @@ def op_stm_ignored_stop(): pass -def op_stm_ptr_eq(x, y): - return op_ptr_eq(x, y) +def op_stm_push_marker(odd_num, object): + pass + +def op_stm_update_marker_num(odd_num): + pass + +def op_stm_pop_marker(): + pass def op_stm_get_tid(x): raise NotImplementedError diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -881,46 +881,10 @@ print >> f, '\t%d,' % (i,) print >> f, '\t-1' print >> f, '};' - print >> f, ''' -void pypy_stm_setup_prebuilt(void) -{ - object_t **pp = rpy_prebuilt; - long *ph = rpy_prebuilt_hashes; - int i = 0; - int *wri = weakref_indices; - for ( ; *pp; pp++, ph++, i++) { - if (i == *wri) { - *pp = stm_setup_prebuilt_weakref(*pp); - wri++; - } - else { - *pp = stm_setup_prebuilt(*pp); - } - stm_set_prebuilt_identityhash(*pp, *ph); - } - - object_t ***cur = (object_t ***) - pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_static_root_start; - object_t ***end = (object_t ***) - pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_static_root_nongcend; - for ( ; cur != end; cur++) { - **cur = stm_setup_prebuilt(**cur); - } -} - -void pypy_stm_register_thread_local(void) -{ - stm_register_thread_local(&stm_thread_local); - stm_thread_local.mem_clear_on_abort = (char *)&pypy_g_ExcData; - stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData); -} - -void pypy_stm_unregister_thread_local(void) -{ - stm_flush_timing(&stm_thread_local, 1); // XXX temporary - stm_unregister_thread_local(&stm_thread_local); -} -''' + print >> f + print >> f, '#include "preimpl.h"' + print >> f, '#include "src/rtyper.h"' + print >> f, '#include "src_stm/extracode.h"' def commondefs(defines): from rpython.rlib.rarithmetic import LONG_BIT, LONGLONG_BIT diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -207,126 +207,30 @@ return '%s = (%s)&stm_thread_local.shadowstack;' % ( result, cdecl(funcgen.lltypename(op.result), '')) +def stm_push_marker(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + arg1 = funcgen.expr(op.args[1]) + return 'STM_PUSH_MARKER(stm_thread_local, %s, %s);' % (arg0, arg1) -##def stm_initialize(funcgen, op): -## return '''stm_initialize(); -## stm_clear_on_abort(&pypy_g_ExcData, sizeof(pypy_g_ExcData)); -## ''' +def stm_update_marker_num(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return 'STM_UPDATE_MARKER_NUM(stm_thread_local, %s);' % (arg0,) -##def stm_finalize(funcgen, op): -## return 'stm_finalize();' +def stm_pop_marker(funcgen, op): + return 'STM_POP_MARKER(stm_thread_local);' -##def stm_barrier(funcgen, op): -## category_change = op.args[0].value -## # XXX: how to unify the stm_barrier llop generation in -## # writebarrier.py and threadlocalref.py? -## if isinstance(category_change, str): -## frm, middle, to = category_change -## else: # rstr -## frm, middle, to = (category_change.chars[0], -## category_change.chars[1], -## category_change.chars[2]) -## assert middle == '2' -## assert frm < to -## if to == 'W': -## if frm >= 'V': -## funcname = 'stm_repeat_write_barrier' -## else: -## funcname = 'stm_write_barrier' -## elif to == 'V': -## funcname = 'stm_write_barrier_noptr' -## elif to == 'R': -## if frm >= 'Q': -## funcname = 'stm_repeat_read_barrier' -## else: -## funcname = 'stm_read_barrier' -## elif to == 'I': -## funcname = 'stm_immut_read_barrier' -## else: -## raise AssertionError(category_change) -## assert op.args[1].concretetype == op.result.concretetype -## arg = funcgen.expr(op.args[1]) -## result = funcgen.expr(op.result) -## return '%s = (%s)%s((gcptr)%s);' % ( -## result, cdecl(funcgen.lltypename(op.result), ''), -## funcname, arg) +def stm_expand_marker(funcgen, op): + result = funcgen.expr(op.result) + return '%s = _stm_expand_marker();' % (result,) -##def stm_ptr_eq(funcgen, op): -## args = [funcgen.expr(v) for v in op.args] -## result = funcgen.expr(op.result) -## # check for prebuilt arguments -## for i, j in [(0, 1), (1, 0)]: -## if isinstance(op.args[j], Constant): -## if op.args[j].value: # non-NULL -## return ('%s = stm_pointer_equal_prebuilt((gcptr)%s, (gcptr)%s);' -## % (result, args[i], args[j])) -## else: -## # this case might be unreachable, but better safe than sorry -## return '%s = (%s == NULL);' % (result, args[i]) -## # -## return '%s = stm_pointer_equal((gcptr)%s, (gcptr)%s);' % ( -## result, args[0], args[1]) - -##def stm_stop_all_other_threads(funcgen, op): -## return 'stm_stop_all_other_threads();' - -##def stm_partial_commit_and_resume_other_threads(funcgen, op): -## return 'stm_partial_commit_and_resume_other_threads();' - -##def stm_get_adr_of_nursery_current(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = (%s)&stm_nursery_current;' % ( -## result, cdecl(funcgen.lltypename(op.result), '')) - -##def stm_get_adr_of_nursery_nextlimit(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = (%s)&stm_nursery_nextlimit;' % ( -## result, cdecl(funcgen.lltypename(op.result), '')) - -##def stm_get_adr_of_active(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = (%s)&stm_active;' % ( -## result, cdecl(funcgen.lltypename(op.result), '')) - -##def stm_get_adr_of_private_rev_num(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = (%s)&stm_private_rev_num;' % ( -## result, cdecl(funcgen.lltypename(op.result), '')) - -##def stm_get_adr_of_read_barrier_cache(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = (%s)&stm_read_barrier_cache;' % ( -## result, cdecl(funcgen.lltypename(op.result), '')) - - -##def stm_weakref_allocate(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## arg1 = funcgen.expr(op.args[1]) -## arg2 = funcgen.expr(op.args[2]) -## result = funcgen.expr(op.result) -## return '%s = stm_weakref_allocate(%s, %s, %s);' % (result, arg0, -## arg1, arg2) - -##def stm_allocate_nonmovable_int_adr(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## result = funcgen.expr(op.result) -## return '%s = stm_allocate_public_integer_address(%s);' % (result, arg0) - -##def stm_get_tid(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## result = funcgen.expr(op.result) -## return '%s = ((struct rpyobj_s*)%s)->tid;' % (result, arg0) - -##def stm_enter_callback_call(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = stm_enter_callback_call();' % (result,) - -##def stm_leave_callback_call(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## return 'stm_leave_callback_call(%s);' % (arg0,) - -##def stm_minor_collect(funcgen, op): -## return 'stm_minor_collect();' - -##def stm_major_collect(funcgen, op): -## return 'stm_major_collect();' +def stm_setup_expand_marker_for_pypy(funcgen, op): + # hack hack hack + node = funcgen.db.gettypedefnode(op.args[0].concretetype.TO) + typename = funcgen.db.gettype(op.args[0].concretetype.TO) + names = [''.join(arg.value.chars) for arg in op.args[1:]] + names = [node.c_struct_field_name('inst_' + name) for name in names] + offsets = ['offsetof(%s, %s)' % (cdecl(typename, ''), name) + for name in names] + assert len(offsets) == 4 + return 'pypy_stm_setup_expand_marker(%s, %s, %s, %s);' % ( + offsets[0], offsets[1], offsets[2], offsets[3]) diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/extracode.h @@ -0,0 +1,115 @@ + +void pypy_stm_setup_prebuilt(void) +{ + object_t **pp = rpy_prebuilt; + long *ph = rpy_prebuilt_hashes; + int i = 0; + int *wri = weakref_indices; + for ( ; *pp; pp++, ph++, i++) { + if (i == *wri) { + *pp = stm_setup_prebuilt_weakref(*pp); + wri++; + } + else { + *pp = stm_setup_prebuilt(*pp); + } + stm_set_prebuilt_identityhash(*pp, *ph); + } + + object_t ***cur = (object_t ***) + pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_static_root_start; + object_t ***end = (object_t ***) + pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_static_root_nongcend; + for ( ; cur != end; cur++) { + **cur = stm_setup_prebuilt(**cur); + } +} + +void pypy_stm_register_thread_local(void) +{ + stm_register_thread_local(&stm_thread_local); + stm_thread_local.mem_clear_on_abort = (char *)&pypy_g_ExcData; + stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData); +} + +void pypy_stm_unregister_thread_local(void) +{ + stm_flush_timing(&stm_thread_local, 1); // XXX temporary + stm_unregister_thread_local(&stm_thread_local); +} + + +/************************************************************/ +/*** HACK: hard-coded logic to expand the marker into ***/ +/*** a string, suitable for running in PyPy ***/ + +static long g_co_filename_ofs; +static long g_co_name_ofs; +static long g_co_firstlineno_ofs; +static long g_co_lnotab_ofs; + +static char *_RPyString_AsString_Real(RPyString *obj) +{ + stm_char *src = _RPyString_AsString(obj); + return STM_SEGMENT->segment_base + (uintptr_t)src; +} + +static void _stm_expand_marker_for_pypy(uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize) +{ + RPyString *co_filename = + *(RPyString **)(((char *)following_object) + g_co_filename_ofs); + RPyString *co_name = + *(RPyString **)(((char *)following_object) + g_co_name_ofs); + long co_firstlineno = + *(long *)(((char *)following_object) + g_co_firstlineno_ofs); + RPyString *co_lnotab = + *(RPyString **)(((char *)following_object) + g_co_lnotab_ofs); + + char *ntrunc = "", *fntrunc = ""; + + long remaining = outputbufsize - 32; + long nlen = RPyString_Size(co_name); + char *name = _RPyString_AsString_Real(co_name); + if (nlen > remaining / 2) { + nlen = remaining / 2; + ntrunc = "..."; + } + remaining -= nlen; + + long fnlen = RPyString_Size(co_filename); + char *fn = _RPyString_AsString_Real(co_filename); + if (fnlen > remaining) { + fn += (fnlen - remaining); + fnlen = remaining; + fntrunc = "..."; + } + + long tablen = RPyString_Size(co_lnotab); + char *tab = _RPyString_AsString_Real(co_lnotab); + uintptr_t next_instr = odd_number >> 1; + long line = co_firstlineno; + uintptr_t i, addr = 0; + for (i = 0; i < tablen; i += 2) { + addr += ((unsigned char *)tab)[i]; + if (addr > next_instr) + break; + line += ((unsigned char *)tab)[i + 1]; + } + + snprintf(outputbuf, outputbufsize, "File \"%s%.*s\", line %ld, in %.*s%s", + fntrunc, (int)fnlen, fn, line, (int)nlen, name, ntrunc); +} + +void pypy_stm_setup_expand_marker(long co_filename_ofs, + long co_name_ofs, + long co_firstlineno_ofs, + long co_lnotab_ofs) +{ + g_co_filename_ofs = co_filename_ofs; + g_co_name_ofs = co_name_ofs; + g_co_firstlineno_ofs = co_firstlineno_ofs; + g_co_lnotab_ofs = co_lnotab_ofs; + stmcb_expand_marker = _stm_expand_marker_for_pypy; +} diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -26,6 +26,11 @@ void _pypy_stm_become_inevitable(const char *); void pypy_stm_become_globally_unique_transaction(void); +void pypy_stm_setup_expand_marker(long co_filename_ofs, + long co_name_ofs, + long co_firstlineno_ofs, + long co_lnotab_ofs); + static inline void pypy_stm_become_inevitable(const char *msg) { diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -240,6 +240,7 @@ assert 'ok\n' in data def test_abort_info(self): + py.test.skip("goes away") class Parent(object): pass class Foobar(Parent): @@ -492,3 +493,60 @@ data = cbuilder.cmdexec('') assert '-84\n' in data assert '-1298\n' in data + + def test_pypy_marker(self): + class PyCode(object): + def __init__(self, co_filename, co_name, + co_firstlineno, co_lnotab): + self.co_filename = co_filename + self.co_name = co_name + self.co_firstlineno = co_firstlineno + self.co_lnotab = co_lnotab + + def run_interpreter(pycode): + print 'starting', pycode.co_name + rstm.push_marker(1, pycode) + for i in range(10): + p = llop.stm_expand_marker(rffi.CCHARP) + print rffi.charp2str(p) + rstm.update_marker_num(i * 2 + 1) + rstm.pop_marker() + print 'stopping', pycode.co_name + + def main(argv): + pycode1 = PyCode("/tmp/foobar.py", "baz", 40, "\x00\x01\x05\x01") + pycode2 = PyCode("/tmp/foobaz.py", "bar", 70, "\x00\x01\x04\x02") + llop.stm_setup_expand_marker_for_pypy( + lltype.Void, pycode1, + "co_filename", "co_name", "co_firstlineno", "co_lnotab") + + run_interpreter(pycode1) + run_interpreter(pycode2) + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert ('starting baz\n' + 'File "/tmp/foobar.py", line 41, in baz\n' + 'File "/tmp/foobar.py", line 41, in baz\n' + 'File "/tmp/foobar.py", line 41, in baz\n' + 'File "/tmp/foobar.py", line 41, in baz\n' + 'File "/tmp/foobar.py", line 41, in baz\n' + 'File "/tmp/foobar.py", line 42, in baz\n' + 'File "/tmp/foobar.py", line 42, in baz\n' + 'File "/tmp/foobar.py", line 42, in baz\n' + 'File "/tmp/foobar.py", line 42, in baz\n' + 'File "/tmp/foobar.py", line 42, in baz\n' + 'stopping baz\n') in data + assert ('starting bar\n' + 'File "/tmp/foobaz.py", line 71, in bar\n' + 'File "/tmp/foobaz.py", line 71, in bar\n' + 'File "/tmp/foobaz.py", line 71, in bar\n' + 'File "/tmp/foobaz.py", line 71, in bar\n' + 'File "/tmp/foobaz.py", line 73, in bar\n' + 'File "/tmp/foobaz.py", line 73, in bar\n' + 'File "/tmp/foobaz.py", line 73, in bar\n' + 'File "/tmp/foobaz.py", line 73, in bar\n' + 'File "/tmp/foobaz.py", line 73, in bar\n' + 'File "/tmp/foobaz.py", line 73, in bar\n' + 'stopping bar\n') in data From noreply at buildbot.pypy.org Sat Apr 19 14:06:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 14:06:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fixes Message-ID: <20140419120601.E22A41D2411@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70769:1969f1e4875a Date: 2014-04-19 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/1969f1e4875a/ Log: Fixes diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -43,35 +43,46 @@ /*** HACK: hard-coded logic to expand the marker into ***/ /*** a string, suitable for running in PyPy ***/ +typedef struct pypy_rpy_string0 RPyStringSpace0; + static long g_co_filename_ofs; static long g_co_name_ofs; static long g_co_firstlineno_ofs; static long g_co_lnotab_ofs; -static char *_RPyString_AsString_Real(RPyString *obj) +static long _fetch_lngspace0(object_t *base, long ofs) { - stm_char *src = _RPyString_AsString(obj); - return STM_SEGMENT->segment_base + (uintptr_t)src; + char *src = STM_SEGMENT->segment_base + (uintptr_t)base; + return *(long *)(src + ofs); +} + +static RPyStringSpace0 *_fetch_rpyspace0(object_t *base, long ofs) +{ + char *src = STM_SEGMENT->segment_base + (uintptr_t)base; + char *str = *(char **)(src + ofs); + char *str0 = STM_SEGMENT->segment_base + (uintptr_t)str; + return (RPyStringSpace0 *)str0; } static void _stm_expand_marker_for_pypy(uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize) { - RPyString *co_filename = - *(RPyString **)(((char *)following_object) + g_co_filename_ofs); - RPyString *co_name = - *(RPyString **)(((char *)following_object) + g_co_name_ofs); - long co_firstlineno = - *(long *)(((char *)following_object) + g_co_firstlineno_ofs); - RPyString *co_lnotab = - *(RPyString **)(((char *)following_object) + g_co_lnotab_ofs); + long co_firstlineno; + RPyStringSpace0 *co_filename; + RPyStringSpace0 *co_name; + RPyStringSpace0 *co_lnotab; + + co_filename = _fetch_rpyspace0(following_object, g_co_filename_ofs); + co_name = _fetch_rpyspace0(following_object, g_co_name_ofs); + co_firstlineno = _fetch_lngspace0(following_object, g_co_firstlineno_ofs); + co_lnotab = _fetch_rpyspace0(following_object, g_co_lnotab_ofs); char *ntrunc = "", *fntrunc = ""; long remaining = outputbufsize - 32; long nlen = RPyString_Size(co_name); - char *name = _RPyString_AsString_Real(co_name); + char *name = _RPyString_AsString(co_name); if (nlen > remaining / 2) { nlen = remaining / 2; ntrunc = "..."; @@ -79,7 +90,7 @@ remaining -= nlen; long fnlen = RPyString_Size(co_filename); - char *fn = _RPyString_AsString_Real(co_filename); + char *fn = _RPyString_AsString(co_filename); if (fnlen > remaining) { fn += (fnlen - remaining); fnlen = remaining; @@ -87,7 +98,7 @@ } long tablen = RPyString_Size(co_lnotab); - char *tab = _RPyString_AsString_Real(co_lnotab); + char *tab = _RPyString_AsString(co_lnotab); uintptr_t next_instr = odd_number >> 1; long line = co_firstlineno; uintptr_t i, addr = 0; diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -509,19 +509,24 @@ for i in range(10): p = llop.stm_expand_marker(rffi.CCHARP) print rffi.charp2str(p) - rstm.update_marker_num(i * 2 + 1) + rstm.update_marker_num((i+1) * 2 + 1) rstm.pop_marker() print 'stopping', pycode.co_name def main(argv): pycode1 = PyCode("/tmp/foobar.py", "baz", 40, "\x00\x01\x05\x01") pycode2 = PyCode("/tmp/foobaz.py", "bar", 70, "\x00\x01\x04\x02") + pycode3 = PyCode( + "/tmp/some/where/very/very/long/path/bla/br/project/foobaz.py", + "some_extremely_longish_and_boring_function_name", + 80, "\x00\x01\x04\x02") llop.stm_setup_expand_marker_for_pypy( lltype.Void, pycode1, "co_filename", "co_name", "co_firstlineno", "co_lnotab") run_interpreter(pycode1) run_interpreter(pycode2) + run_interpreter(pycode3) return 0 t, cbuilder = self.compile(main) @@ -550,3 +555,6 @@ 'File "/tmp/foobaz.py", line 73, in bar\n' 'File "/tmp/foobaz.py", line 73, in bar\n' 'stopping bar\n') in data + assert ('starting some_extremely_longish_and_boring_function_name\n' + 'File "...bla/br/project/foobaz.py", line 81,' + ' in some_extremely_longish_a...\n') in data From noreply at buildbot.pypy.org Sat Apr 19 14:50:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 14:50:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add llop.stm_setup_expand_marker_for_pypy here Message-ID: <20140419125044.01A9E1C02D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70770:7f57612f9a81 Date: 2014-04-19 14:15 +0200 http://bitbucket.org/pypy/pypy/changeset/7f57612f9a81/ Log: Add llop.stm_setup_expand_marker_for_pypy here diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -387,6 +387,16 @@ if isinstance(w_mod, Module) and not w_mod.startup_called: w_mod.init(self) + if self.config.translation.stm: + from rpython.rtyper.lltypesystem import lltype + from rpython.rtyper.lltypesystem.lloperation import llop + from rpython.rlib.objectmodel import instantiate + from pypy.interpreter.pycode import PyCode + # + llop.stm_setup_expand_marker_for_pypy( + lltype.Void, instantiate(PyCode), + "co_filename", "co_name", "co_firstlineno", "co_lnotab") + def finish(self): self.wait_for_thread_shutdown() w_exitfunc = self.sys.getdictvalue(self, 'exitfunc') From noreply at buildbot.pypy.org Sat Apr 19 14:50:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 14:50:45 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140419125045.3D4051C02D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70771:27d58254e679 Date: 2014-04-19 14:50 +0200 http://bitbucket.org/pypy/pypy/changeset/27d58254e679/ Log: fix diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -79,8 +79,9 @@ # only used for no-jit. The jit-jitdriver is # in interp_jit.py stmonly_jitdriver.jit_merge_point( - self=self, co_code=co_code, - next_instr=next_instr, ec=ec) + frame=self, pycode=co_code, + next_instr=next_instr, ec=ec, + is_being_profiled=self.is_being_profiled) next_instr = self.handle_bytecode(co_code, next_instr, ec) rstm.update_marker_num(intmask(next_instr) * 2 + 1) except ExitFrame: From noreply at buildbot.pypy.org Sat Apr 19 15:55:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 15:55:39 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: ignore these for the jit Message-ID: <20140419135539.864661C02D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70772:6a40ee3af5b3 Date: 2014-04-19 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/6a40ee3af5b3/ Log: ignore these for the jit diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1887,6 +1887,13 @@ None) return [op0, op1] + def rewrite_op_stm_push_marker(self, op): + return [] + def rewrite_op_stm_update_marker_num(self, op): + return [] + def rewrite_op_stm_pop_marker(self, op): + return [] + # ____________________________________________________________ class NotSupported(Exception): From noreply at buildbot.pypy.org Sat Apr 19 16:28:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 19 Apr 2014 16:28:28 +0200 (CEST) Subject: [pypy-commit] pypy default: unused imports Message-ID: <20140419142828.99C931C0721@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70773:89a3b31cd64e Date: 2014-04-19 10:19 -0400 http://bitbucket.org/pypy/pypy/changeset/89a3b31cd64e/ Log: unused imports diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop, ufuncs +from pypy.module.micronumpy import descriptor, loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,7 +42,6 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support -from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray From noreply at buildbot.pypy.org Sat Apr 19 18:34:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 18:34:19 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Test and fix Message-ID: <20140419163419.DD8D11C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70774:a546ea9be6e4 Date: 2014-04-19 17:47 +0200 http://bitbucket.org/pypy/pypy/changeset/a546ea9be6e4/ Log: Test and fix diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -225,12 +225,17 @@ def stm_setup_expand_marker_for_pypy(funcgen, op): # hack hack hack - node = funcgen.db.gettypedefnode(op.args[0].concretetype.TO) - typename = funcgen.db.gettype(op.args[0].concretetype.TO) - names = [''.join(arg.value.chars) for arg in op.args[1:]] - names = [node.c_struct_field_name('inst_' + name) for name in names] - offsets = ['offsetof(%s, %s)' % (cdecl(typename, ''), name) - for name in names] + offsets = [] + for arg in op.args[1:]: + name = 'inst_' + ''.join(arg.value.chars) + S = op.args[0].concretetype.TO + while True: + node = funcgen.db.gettypedefnode(S) + if name in node.fieldnames: + break + S = S.super + name = node.c_struct_field_name(name) + offsets.append('offsetof(struct %s, %s)' % (node.name, name)) assert len(offsets) == 4 return 'pypy_stm_setup_expand_marker(%s, %s, %s, %s);' % ( offsets[0], offsets[1], offsets[2], offsets[3]) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -495,7 +495,9 @@ assert '-1298\n' in data def test_pypy_marker(self): - class PyCode(object): + class Code(object): + pass + class PyCode(Code): def __init__(self, co_filename, co_name, co_firstlineno, co_lnotab): self.co_filename = co_filename @@ -520,6 +522,7 @@ "/tmp/some/where/very/very/long/path/bla/br/project/foobaz.py", "some_extremely_longish_and_boring_function_name", 80, "\x00\x01\x04\x02") + Code().co_name = "moved up" llop.stm_setup_expand_marker_for_pypy( lltype.Void, pycode1, "co_filename", "co_name", "co_firstlineno", "co_lnotab") From noreply at buildbot.pypy.org Sat Apr 19 18:57:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 18:57:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/8bd21f95eb8a (branch "marker") Message-ID: <20140419165706.A73371C362D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70775:db460ef20c68 Date: 2014-04-19 18:42 +0200 http://bitbucket.org/pypy/pypy/changeset/db460ef20c68/ Log: import stmgc/8bd21f95eb8a (branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -035e1c7879be +8bd21f95eb8a diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -4,7 +4,7 @@ #endif -void (*stmcb_expand_marker)(uintptr_t odd_number, +void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); @@ -22,7 +22,7 @@ uintptr_t x = (uintptr_t)current->ss; if (x & 1) { /* the stack entry is an odd number */ - stmcb_expand_marker(x, current[1].ss, + stmcb_expand_marker(pseg->pub.segment_base, x, current[1].ss, pseg->marker_self, _STM_MARKER_LEN); if (pseg->marker_self[0] == 0) { diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -379,7 +379,7 @@ /* The markers pushed in the shadowstack are an odd number followed by a regular pointer. When needed, this library invokes this callback to turn this pair into a human-readable explanation. */ -extern void (*stmcb_expand_marker)(uintptr_t odd_number, +extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); From noreply at buildbot.pypy.org Sat Apr 19 18:57:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 18:57:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Use the passed-in segment_base, instead of relying on STM_SEGMENT. Message-ID: <20140419165707.BF61C1C362D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70776:766be3ebb6ba Date: 2014-04-19 18:45 +0200 http://bitbucket.org/pypy/pypy/changeset/766be3ebb6ba/ Log: Use the passed-in segment_base, instead of relying on STM_SEGMENT. diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -50,33 +50,34 @@ static long g_co_firstlineno_ofs; static long g_co_lnotab_ofs; -static long _fetch_lngspace0(object_t *base, long ofs) +static long _fetch_lngspace0(char *seg_base, object_t *base, long ofs) { - char *src = STM_SEGMENT->segment_base + (uintptr_t)base; + char *src = seg_base + (uintptr_t)base; return *(long *)(src + ofs); } -static RPyStringSpace0 *_fetch_rpyspace0(object_t *base, long ofs) +static RPyStringSpace0 *_fetch_rpyspace0(char *seg_base, object_t *base, + long ofs) { - char *src = STM_SEGMENT->segment_base + (uintptr_t)base; + char *src = seg_base + (uintptr_t)base; char *str = *(char **)(src + ofs); - char *str0 = STM_SEGMENT->segment_base + (uintptr_t)str; + char *str0 = seg_base + (uintptr_t)str; return (RPyStringSpace0 *)str0; } -static void _stm_expand_marker_for_pypy(uintptr_t odd_number, - object_t *following_object, - char *outputbuf, size_t outputbufsize) +static void _stm_expand_marker_for_pypy( + char *segment_base, uintptr_t odd_number, object_t *o, + char *outputbuf, size_t outputbufsize) { long co_firstlineno; RPyStringSpace0 *co_filename; RPyStringSpace0 *co_name; RPyStringSpace0 *co_lnotab; - co_filename = _fetch_rpyspace0(following_object, g_co_filename_ofs); - co_name = _fetch_rpyspace0(following_object, g_co_name_ofs); - co_firstlineno = _fetch_lngspace0(following_object, g_co_firstlineno_ofs); - co_lnotab = _fetch_rpyspace0(following_object, g_co_lnotab_ofs); + co_filename = _fetch_rpyspace0(segment_base, o, g_co_filename_ofs); + co_name = _fetch_rpyspace0(segment_base, o, g_co_name_ofs); + co_firstlineno = _fetch_lngspace0(segment_base, o, g_co_firstlineno_ofs); + co_lnotab = _fetch_rpyspace0(segment_base, o, g_co_lnotab_ofs); char *ntrunc = "", *fntrunc = ""; From noreply at buildbot.pypy.org Sat Apr 19 18:57:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 18:57:08 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Bah? term.h contains "#define tab ..." Message-ID: <20140419165708.EABE31C362D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70777:2305dd958f77 Date: 2014-04-19 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/2305dd958f77/ Log: Bah? term.h contains "#define tab ..." diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -98,16 +98,16 @@ fntrunc = "..."; } - long tablen = RPyString_Size(co_lnotab); - char *tab = _RPyString_AsString(co_lnotab); + long lnotablen = RPyString_Size(co_lnotab); + char *lnotab = _RPyString_AsString(co_lnotab); uintptr_t next_instr = odd_number >> 1; long line = co_firstlineno; uintptr_t i, addr = 0; - for (i = 0; i < tablen; i += 2) { - addr += ((unsigned char *)tab)[i]; + for (i = 0; i < lnotablen; i += 2) { + addr += ((unsigned char *)lnotab)[i]; if (addr > next_instr) break; - line += ((unsigned char *)tab)[i + 1]; + line += ((unsigned char *)lnotab)[i + 1]; } snprintf(outputbuf, outputbufsize, "File \"%s%.*s\", line %ld, in %.*s%s", From noreply at buildbot.pypy.org Sat Apr 19 18:57:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 18:57:10 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140419165710.0EE9F1C362D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70778:75b8ab07c126 Date: 2014-04-19 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/75b8ab07c126/ Log: merge heads diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -79,8 +79,9 @@ # only used for no-jit. The jit-jitdriver is # in interp_jit.py stmonly_jitdriver.jit_merge_point( - self=self, co_code=co_code, - next_instr=next_instr, ec=ec) + frame=self, pycode=co_code, + next_instr=next_instr, ec=ec, + is_being_profiled=self.is_being_profiled) next_instr = self.handle_bytecode(co_code, next_instr, ec) rstm.update_marker_num(intmask(next_instr) * 2 + 1) except ExitFrame: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1887,6 +1887,13 @@ None) return [op0, op1] + def rewrite_op_stm_push_marker(self, op): + return [] + def rewrite_op_stm_update_marker_num(self, op): + return [] + def rewrite_op_stm_pop_marker(self, op): + return [] + # ____________________________________________________________ class NotSupported(Exception): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -225,12 +225,17 @@ def stm_setup_expand_marker_for_pypy(funcgen, op): # hack hack hack - node = funcgen.db.gettypedefnode(op.args[0].concretetype.TO) - typename = funcgen.db.gettype(op.args[0].concretetype.TO) - names = [''.join(arg.value.chars) for arg in op.args[1:]] - names = [node.c_struct_field_name('inst_' + name) for name in names] - offsets = ['offsetof(%s, %s)' % (cdecl(typename, ''), name) - for name in names] + offsets = [] + for arg in op.args[1:]: + name = 'inst_' + ''.join(arg.value.chars) + S = op.args[0].concretetype.TO + while True: + node = funcgen.db.gettypedefnode(S) + if name in node.fieldnames: + break + S = S.super + name = node.c_struct_field_name(name) + offsets.append('offsetof(struct %s, %s)' % (node.name, name)) assert len(offsets) == 4 return 'pypy_stm_setup_expand_marker(%s, %s, %s, %s);' % ( offsets[0], offsets[1], offsets[2], offsets[3]) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -495,7 +495,9 @@ assert '-1298\n' in data def test_pypy_marker(self): - class PyCode(object): + class Code(object): + pass + class PyCode(Code): def __init__(self, co_filename, co_name, co_firstlineno, co_lnotab): self.co_filename = co_filename @@ -520,6 +522,7 @@ "/tmp/some/where/very/very/long/path/bla/br/project/foobaz.py", "some_extremely_longish_and_boring_function_name", 80, "\x00\x01\x04\x02") + Code().co_name = "moved up" llop.stm_setup_expand_marker_for_pypy( lltype.Void, pycode1, "co_filename", "co_name", "co_firstlineno", "co_lnotab") From noreply at buildbot.pypy.org Sat Apr 19 19:01:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 19:01:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add the markers here too Message-ID: <20140419170132.5B5C81D23CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70779:929f41fc2aad Date: 2014-04-19 19:00 +0200 http://bitbucket.org/pypy/pypy/changeset/929f41fc2aad/ Log: Add the markers here too diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -48,6 +48,7 @@ self = hint(self, access_directly=True) next_instr = r_uint(next_instr) is_being_profiled = self.is_being_profiled + rstm.push_marker(intmask(next_instr) * 2 + 1, pycode) try: while True: pypyjitdriver.jit_merge_point(ec=ec, @@ -59,6 +60,7 @@ co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) + rstm.update_marker_num(intmask(next_instr) * 2 + 1) is_being_profiled = self.is_being_profiled except Yield: self.last_exception = None @@ -68,6 +70,8 @@ except ExitFrame: self.last_exception = None return self.popvalue() + finally: + rstm.pop_marker() def jump_absolute(self, jumpto, ec): if we_are_jitted(): From noreply at buildbot.pypy.org Sat Apr 19 19:47:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 19:47:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix: putting push/pop_marker here avoids duplicating them, Message-ID: <20140419174724.A01991C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70780:885b1d74996a Date: 2014-04-19 19:46 +0200 http://bitbucket.org/pypy/pypy/changeset/885b1d74996a/ Log: Fix: putting push/pop_marker here avoids duplicating them, and fixes an issue with jitdriver.py rewriting the function in some indirect way diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -1,7 +1,7 @@ """ PyFrame class implementation with the interpreter main loop. """ -from rpython.rlib import jit +from rpython.rlib import jit, rstm from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint from rpython.rlib.objectmodel import we_are_translated, instantiate @@ -210,12 +210,15 @@ if next_instr != 0: self.pushvalue(w_inputvalue) # + rstm.push_marker(intmask(next_instr) * 2 + 1, self.pycode) try: w_exitvalue = self.dispatch(self.pycode, next_instr, executioncontext) except Exception: + rstm.pop_marker() executioncontext.return_trace(self, self.space.w_None) raise + rstm.pop_marker() executioncontext.return_trace(self, w_exitvalue) # it used to say self.last_exception = None # this is now done by the code in pypyjit module diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -71,8 +71,6 @@ # For the sequel, force 'next_instr' to be unsigned for performance next_instr = r_uint(next_instr) co_code = pycode.co_code - rstm.push_marker(intmask(next_instr) * 2 + 1, pycode) - try: while True: if self.space.config.translation.stm: @@ -86,8 +84,6 @@ rstm.update_marker_num(intmask(next_instr) * 2 + 1) except ExitFrame: return self.popvalue() - finally: - rstm.pop_marker() def handle_bytecode(self, co_code, next_instr, ec): try: diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -48,7 +48,6 @@ self = hint(self, access_directly=True) next_instr = r_uint(next_instr) is_being_profiled = self.is_being_profiled - rstm.push_marker(intmask(next_instr) * 2 + 1, pycode) try: while True: pypyjitdriver.jit_merge_point(ec=ec, @@ -70,8 +69,6 @@ except ExitFrame: self.last_exception = None return self.popvalue() - finally: - rstm.pop_marker() def jump_absolute(self, jumpto, ec): if we_are_jitted(): From noreply at buildbot.pypy.org Sat Apr 19 20:48:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 19 Apr 2014 20:48:59 +0200 (CEST) Subject: [pypy-commit] pypy default: ensure pypy_setup_home succeeds Message-ID: <20140419184859.088BF1D23CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70781:6f93f63d3bc1 Date: 2014-04-19 11:44 -0700 http://bitbucket.org/pypy/pypy/changeset/6f93f63d3bc1/ Log: ensure pypy_setup_home succeeds diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -26,5 +26,7 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + assert lltype.typeOf(res) == rffi.LONG + assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') From noreply at buildbot.pypy.org Sat Apr 19 20:49:01 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 19 Apr 2014 20:49:01 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140419184901.395231D23CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70782:a576218ed693 Date: 2014-04-19 11:45 -0700 http://bitbucket.org/pypy/pypy/changeset/a576218ed693/ Log: merge default diff too long, truncating to 2000 out of 2376 lines diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -40,7 +40,7 @@ ``pypy-stm`` project is to improve what is so far the state-of-the-art for using multiple CPUs, which for cases where separate processes don't work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is flushing forward an approach to *hide* the threads, as +``pypy-stm`` is pushing forward an approach to *hide* the threads, as described below in `atomic sections`_. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -140,3 +140,6 @@ .. branch: numpypy-nditer Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -26,5 +26,7 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + assert lltype.typeOf(res) == rffi.LONG + assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -284,9 +284,11 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return ArrayIter(self, support.product(shape), shape, r[0], r[1]) - return ArrayIter(self, self.get_size(), self.shape, - self.strides, self.backstrides) + i = ArrayIter(self, support.product(shape), shape, r[0], r[1]) + else: + i = ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) + return i, i.reset() def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop, ufuncs +from pypy.module.micronumpy import descriptor, loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter @@ -156,10 +156,10 @@ "string is smaller than requested size")) a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) - ai = a.create_iter() + ai, state = a.create_iter() for val in items: - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) return space.wrap(a) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -32,24 +32,23 @@ self.reset() def reset(self): - self.iter = self.base.create_iter() + self.iter, self.state = self.base.create_iter() def descr_len(self, space): - return space.wrap(self.base.get_size()) + return space.wrap(self.iter.size) def descr_next(self, space): - if self.iter.done(): + if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) - w_res = self.iter.getitem() - self.iter.next() + w_res = self.iter.getitem(self.state) + self.state = self.iter.next(self.state) return w_res def descr_index(self, space): - return space.wrap(self.iter.index) + return space.wrap(self.state.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.iter.index)) - return space.newtuple([space.wrap(c) for c in coords]) + return space.newtuple([space.wrap(c) for c in self.state.indices]) def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or @@ -58,13 +57,13 @@ self.reset() base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - base_iter = base.create_iter() - base_iter.next_skip_x(start) + base_iter, base_state = base.create_iter() + base_state = base_iter.next_skip_x(base_state, start) if length == 1: - return base_iter.getitem() + return base_iter.getitem(base_state) res = W_NDimArray.from_shape(space, [length], base.get_dtype(), base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, base_iter, step) + return loop.flatiter_getitem(res, base_iter, base_state, step) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,7 +42,6 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support -from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -52,19 +51,20 @@ self.shapelen = len(shape) self.indexes = [0] * len(shape) self._done = False - self.idx_w = [None] * len(idx_w) + self.idx_w_i = [None] * len(idx_w) + self.idx_w_s = [None] * len(idx_w) for i, w_idx in enumerate(idx_w): if isinstance(w_idx, W_NDimArray): - self.idx_w[i] = w_idx.create_iter(shape) + self.idx_w_i[i], self.idx_w_s[i] = w_idx.create_iter(shape) def done(self): return self._done @jit.unroll_safe def next(self): - for w_idx in self.idx_w: - if w_idx is not None: - w_idx.next() + for i, idx_w_i in enumerate(self.idx_w_i): + if idx_w_i is not None: + self.idx_w_s[i] = idx_w_i.next(self.idx_w_s[i]) for i in range(self.shapelen - 1, -1, -1): if self.indexes[i] < self.shape[i] - 1: self.indexes[i] += 1 @@ -79,6 +79,16 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] +class IterState(object): + _immutable_fields_ = ['iterator', 'index', 'indices[*]', 'offset'] + + def __init__(self, iterator, index, indices, offset): + self.iterator = iterator + self.index = index + self.indices = indices + self.offset = offset + + class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]'] @@ -92,94 +102,65 @@ self.strides = strides self.backstrides = backstrides - self.index = 0 - self.indices = [0] * len(shape) - self.offset = array.start + def reset(self): + return IterState(self, 0, [0] * len(self.shape_m1), self.array.start) @jit.unroll_safe - def reset(self): - self.index = 0 + def next(self, state): + assert state.iterator is self + index = state.index + 1 + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - self.indices[i] = 0 - self.offset = self.array.start + idx = indices[i] + if idx < self.shape_m1[i]: + indices[i] = idx + 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + return IterState(self, index, indices, offset) @jit.unroll_safe - def next(self): - self.index += 1 + def next_skip_x(self, state, step): + assert state.iterator is self + assert step >= 0 + if step == 0: + return state + index = state.index + step + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < self.shape_m1[i]: - self.indices[i] = idx + 1 - self.offset += self.strides[i] + idx = indices[i] + if idx < (self.shape_m1[i] + 1) - step: + indices[i] = idx + step + offset += self.strides[i] * step break else: - self.indices[i] = 0 - self.offset -= self.backstrides[i] - - @jit.unroll_safe - def next_skip_x(self, step): - assert step >= 0 - if step == 0: - return - self.index += step - for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < (self.shape_m1[i] + 1) - step: - self.indices[i] = idx + step - self.offset += self.strides[i] * step - break - else: - rem_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + rem_step = (idx + step) // (self.shape_m1[i] + 1) cur_step = step - rem_step * (self.shape_m1[i] + 1) - self.indices[i] += cur_step - self.offset += self.strides[i] * cur_step + indices[i] = idx + cur_step + offset += self.strides[i] * cur_step step = rem_step assert step > 0 + return IterState(self, index, indices, offset) - def done(self): - return self.index >= self.size + def done(self, state): + assert state.iterator is self + return state.index >= self.size - def getitem(self): - return self.array.getitem(self.offset) + def getitem(self, state): + assert state.iterator is self + return self.array.getitem(state.offset) - def getitem_bool(self): - return self.array.getitem_bool(self.offset) + def getitem_bool(self, state): + assert state.iterator is self + return self.array.getitem_bool(state.offset) - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - -class SliceIterator(ArrayIter): - def __init__(self, arr, strides, backstrides, shape, order="C", - backward=False, dtype=None): - if dtype is None: - dtype = arr.implementation.dtype - self.dtype = dtype - self.arr = arr - if backward: - self.slicesize = shape[0] - self.gap = [support.product(shape[1:]) * dtype.elsize] - strides = strides[1:] - backstrides = backstrides[1:] - shape = shape[1:] - strides.reverse() - backstrides.reverse() - shape.reverse() - size = support.product(shape) - else: - shape = [support.product(shape)] - strides, backstrides = calc_strides(shape, dtype, order) - size = 1 - self.slicesize = support.product(shape) - self.gap = strides - - ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) - - def getslice(self): - from pypy.module.micronumpy.concrete import SliceArray - retVal = SliceArray(self.offset, self.gap, self.backstrides, - [self.slicesize], self.arr.implementation, self.arr, self.dtype) - return retVal + def setitem(self, state, elem): + assert state.iterator is self + self.array.setitem(state.offset, elem) def AxisIter(array, shape, axis, cumulative): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -12,11 +12,10 @@ AllButAxisIter -call2_driver = jit.JitDriver(name='numpy_call2', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter']) +call2_driver = jit.JitDriver( + name='numpy_call2', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority @@ -46,47 +45,40 @@ if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) - left_iter = w_lhs.create_iter(shape) - right_iter = w_rhs.create_iter(shape) - out_iter = out.create_iter(shape) + left_iter, left_state = w_lhs.create_iter(shape) + right_iter, right_state = w_rhs.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_lhs=w_lhs, w_rhs=w_rhs, - out=out, - left_iter=left_iter, right_iter=right_iter, - out_iter=out_iter) - w_left = left_iter.getitem().convert_to(space, calc_dtype) - w_right = right_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, w_left, w_right).convert_to( + calc_dtype=calc_dtype, res_dtype=res_dtype) + w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) + w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( space, res_dtype)) - left_iter.next() - right_iter.next() - out_iter.next() + left_state = left_iter.next(left_state) + right_state = right_iter.next(right_state) + out_state = out_iter.next(out_state) return out -call1_driver = jit.JitDriver(name='numpy_call1', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_obj', 'out', 'obj_iter', - 'out_iter']) +call1_driver = jit.JitDriver( + name='numpy_call1', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) - obj_iter = w_obj.create_iter(shape) - out_iter = out.create_iter(shape) + obj_iter, obj_state = w_obj.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_obj=w_obj, out=out, - obj_iter=obj_iter, out_iter=out_iter) - elem = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, elem).convert_to(space, res_dtype)) - out_iter.next() - obj_iter.next() + calc_dtype=calc_dtype, res_dtype=res_dtype) + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) return out setslice_driver = jit.JitDriver(name='numpy_setslice', @@ -96,18 +88,20 @@ def setslice(space, shape, target, source): # note that unlike everything else, target and source here are # array implementations, not arrays - target_iter = target.create_iter(shape) - source_iter = source.create_iter(shape) + target_iter, target_state = target.create_iter(shape) + source_iter, source_state = source.create_iter(shape) dtype = target.dtype shapelen = len(shape) - while not target_iter.done(): + while not target_iter.done(target_state): setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + val = source_iter.getitem(source_state) if dtype.is_str_or_unicode(): - target_iter.setitem(dtype.coerce(space, source_iter.getitem())) + val = dtype.coerce(space, val) else: - target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) - target_iter.next() - source_iter.next() + val = val.convert_to(space, dtype) + target_iter.setitem(target_state, val) + target_state = target_iter.next(target_state) + source_state = source_iter.next(source_state) return target reduce_driver = jit.JitDriver(name='numpy_reduce', @@ -116,22 +110,22 @@ reds = 'auto') def compute_reduce(space, obj, calc_dtype, func, done_func, identity): - obj_iter = obj.create_iter() + obj_iter, obj_state = obj.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, calc_dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval cur_value = func(calc_dtype, cur_value, rval) - obj_iter.next() + obj_state = obj_iter.next(obj_state) return cur_value reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', @@ -139,69 +133,76 @@ reds = 'auto') def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): - obj_iter = obj.create_iter() - out_iter = out.create_iter() + obj_iter, obj_state = obj.create_iter() + out_iter, out_state = out.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) def fill(arr, box): - arr_iter = arr.create_iter() - while not arr_iter.done(): - arr_iter.setitem(box) - arr_iter.next() + arr_iter, arr_state = arr.create_iter() + while not arr_iter.done(arr_state): + arr_iter.setitem(arr_state, box) + arr_state = arr_iter.next(arr_state) def assign(space, arr, seq): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() for item in seq: - arr_iter.setitem(arr_dtype.coerce(space, item)) - arr_iter.next() + arr_iter.setitem(arr_state, arr_dtype.coerce(space, item)) + arr_state = arr_iter.next(arr_state) where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') def where(space, out, shape, arr, x, y, dtype): - out_iter = out.create_iter(shape) - arr_iter = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) arr_dtype = arr.get_dtype() - x_iter = x.create_iter(shape) - y_iter = y.create_iter(shape) + x_iter, x_state = x.create_iter(shape) + y_iter, y_state = y.create_iter(shape) if x.is_scalar(): if y.is_scalar(): - iter = arr_iter + iter, state = arr_iter, arr_state else: - iter = y_iter + iter, state = y_iter, y_state else: - iter = x_iter + iter, state = x_iter, x_state shapelen = len(shape) - while not iter.done(): + while not iter.done(state): where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, arr_dtype=arr_dtype) - w_cond = arr_iter.getitem() + w_cond = arr_iter.getitem(arr_state) if arr_dtype.itemtype.bool(w_cond): - w_val = x_iter.getitem().convert_to(space, dtype) + w_val = x_iter.getitem(x_state).convert_to(space, dtype) else: - w_val = y_iter.getitem().convert_to(space, dtype) - out_iter.setitem(w_val) - out_iter.next() - arr_iter.next() - x_iter.next() - y_iter.next() + w_val = y_iter.getitem(y_state).convert_to(space, dtype) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) + x_state = x_iter.next(x_state) + y_state = y_iter.next(y_state) + if x.is_scalar(): + if y.is_scalar(): + state = arr_state + else: + state = y_state + else: + state = x_state return out axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', @@ -212,31 +213,36 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) + out_state = out_iter.reset() if cumulative: temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) + temp_state = temp_iter.reset() else: - temp_iter = out_iter # hack - arr_iter = arr.create_iter() + temp_iter = out_iter # hack + temp_state = out_state + arr_iter, arr_state = arr.create_iter() if identity is not None: identity = identity.convert_to(space, dtype) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - assert not arr_iter.done() - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.indices[axis] == 0: + assert not arr_iter.done(arr_state) + w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) + if out_state.indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) else: - cur = temp_iter.getitem() + cur = temp_iter.getitem(temp_state) w_val = func(dtype, cur, w_val) - out_iter.setitem(w_val) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) if cumulative: - temp_iter.setitem(w_val) - temp_iter.next() - arr_iter.next() - out_iter.next() + temp_iter.setitem(temp_state, w_val) + temp_state = temp_iter.next(temp_state) + else: + temp_state = out_state + arr_state = arr_iter.next(arr_state) return out @@ -249,18 +255,18 @@ result = 0 idx = 1 dtype = arr.get_dtype() - iter = arr.create_iter() - cur_best = iter.getitem() - iter.next() + iter, state = arr.create_iter() + cur_best = iter.getitem(state) + state = iter.next(state) shapelen = len(arr.get_shape()) - while not iter.done(): + while not iter.done(state): arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_val = iter.getitem() + w_val = iter.getitem(state) new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - iter.next() + state = iter.next(state) idx += 1 return result return argmin_argmax @@ -291,17 +297,19 @@ right_impl = right.implementation assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.create_iter() + outi, outs = result.create_iter() lefti = AllButAxisIter(left_impl, len(left_shape) - 1) righti = AllButAxisIter(right_impl, right_critical_dim) + lefts = lefti.reset() + rights = righti.reset() n = left_impl.shape[-1] s1 = left_impl.strides[-1] s2 = right_impl.strides[right_critical_dim] - while not lefti.done(): - while not righti.done(): - oval = outi.getitem() - i1 = lefti.offset - i2 = righti.offset + while not lefti.done(lefts): + while not righti.done(rights): + oval = outi.getitem(outs) + i1 = lefts.offset + i2 = rights.offset i = 0 while i < n: i += 1 @@ -311,11 +319,11 @@ oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) i1 += s1 i2 += s2 - outi.setitem(oval) - outi.next() - righti.next() - righti.reset() - lefti.next() + outi.setitem(outs, oval) + outs = outi.next(outs) + rights = righti.next(rights) + rights = righti.reset() + lefts = lefti.next(lefts) return result count_all_true_driver = jit.JitDriver(name = 'numpy_count', @@ -324,13 +332,13 @@ def count_all_true_concrete(impl): s = 0 - iter = impl.create_iter() + iter, state = impl.create_iter() shapelen = len(impl.shape) dtype = impl.dtype - while not iter.done(): + while not iter.done(state): count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - s += iter.getitem_bool() - iter.next() + s += iter.getitem_bool(state) + state = iter.next(state) return s def count_all_true(arr): @@ -344,18 +352,18 @@ reds = 'auto') def nonzero(res, arr, box): - res_iter = res.create_iter() - arr_iter = arr.create_iter() + res_iter, res_state = res.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) - while not arr_iter.done(): + while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) - if arr_iter.getitem_bool(): + if arr_iter.getitem_bool(arr_state): for d in dims: - res_iter.setitem(box(arr_iter.indices[d])) - res_iter.next() - arr_iter.next() + res_iter.setitem(res_state, box(arr_state.indices[d])) + res_state = res_iter.next(res_state) + arr_state = arr_iter.next(arr_state) return res @@ -365,26 +373,26 @@ reds = 'auto') def getitem_filter(res, arr, index): - res_iter = res.create_iter() + res_iter, res_state = res.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() - arr_iter = arr.create_iter() + index_iter, index_state = index.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() index_dtype = index.get_dtype() # XXX length of shape of index as well? - while not index_iter.done(): + while not index_iter.done(index_state): getitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - res_iter.setitem(arr_iter.getitem()) - res_iter.next() - index_iter.next() - arr_iter.next() + if index_iter.getitem_bool(index_state): + res_iter.setitem(res_state, arr_iter.getitem(arr_state)) + res_state = res_iter.next(res_state) + index_state = index_iter.next(index_state) + arr_state = arr_iter.next(arr_state) return res setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', @@ -393,41 +401,42 @@ reds = 'auto') def setitem_filter(space, arr, index, value): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() + index_iter, index_state = index.create_iter() if value.get_size() == 1: - value_iter = value.create_iter(arr.get_shape()) + value_iter, value_state = value.create_iter(arr.get_shape()) else: - value_iter = value.create_iter() + value_iter, value_state = value.create_iter() index_dtype = index.get_dtype() arr_dtype = arr.get_dtype() - while not index_iter.done(): + while not index_iter.done(index_state): setitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - arr_iter.setitem(arr_dtype.coerce(space, value_iter.getitem())) - value_iter.next() - arr_iter.next() - index_iter.next() + if index_iter.getitem_bool(index_state): + val = arr_dtype.coerce(space, value_iter.getitem(value_state)) + value_state = value_iter.next(value_state) + arr_iter.setitem(arr_state, val) + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], reds = 'auto') -def flatiter_getitem(res, base_iter, step): - ri = res.create_iter() +def flatiter_getitem(res, base_iter, base_state, step): + ri, rs = res.create_iter() dtype = res.get_dtype() - while not ri.done(): + while not ri.done(rs): flatiter_getitem_driver.jit_merge_point(dtype=dtype) - ri.setitem(base_iter.getitem()) - base_iter.next_skip_x(step) - ri.next() + ri.setitem(rs, base_iter.getitem(base_state)) + base_state = base_iter.next_skip_x(base_state, step) + rs = ri.next(rs) return res flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', @@ -436,19 +445,21 @@ def flatiter_setitem(space, arr, val, start, step, length): dtype = arr.get_dtype() - arr_iter = arr.create_iter() - val_iter = val.create_iter() - arr_iter.next_skip_x(start) + arr_iter, arr_state = arr.create_iter() + val_iter, val_state = val.create_iter() + arr_state = arr_iter.next_skip_x(arr_state, start) while length > 0: flatiter_setitem_driver.jit_merge_point(dtype=dtype) + val = val_iter.getitem(val_state) if dtype.is_str_or_unicode(): - arr_iter.setitem(dtype.coerce(space, val_iter.getitem())) + val = dtype.coerce(space, val) else: - arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) + val = val.convert_to(space, dtype) + arr_iter.setitem(arr_state, val) # need to repeat i_nput values until all assignments are done - arr_iter.next_skip_x(step) + arr_state = arr_iter.next_skip_x(arr_state, step) + val_state = val_iter.next(val_state) length -= 1 - val_iter.next() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], @@ -456,30 +467,30 @@ def fromstring_loop(space, a, dtype, itemsize, s): i = 0 - ai = a.create_iter() - while not ai.done(): + ai, state = a.create_iter() + while not ai.done(state): fromstring_driver.jit_merge_point(dtype=dtype, itemsize=itemsize) sub = s[i*itemsize:i*itemsize + itemsize] if dtype.is_str_or_unicode(): val = dtype.coerce(space, space.wrap(sub)) else: val = dtype.itemtype.runpack_str(space, sub) - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) i += 1 def tostring(space, arr): builder = StringBuilder() - iter = arr.create_iter() + iter, state = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) - while not iter.done(): - w_res_str.implementation.setitem(0, iter.getitem()) + while not iter.done(state): + w_res_str.implementation.setitem(0, iter.getitem(state)) for i in range(itemsize): builder.append(res_str_casted[i]) - iter.next() + state = iter.next(state) return builder.build() getitem_int_driver = jit.JitDriver(name = 'numpy_getitem_int', @@ -500,8 +511,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] res.descr_setitem(space, space.newtuple(prefix_w[:prefixlen] + @@ -528,8 +539,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space, @@ -547,13 +558,14 @@ def byteswap(from_, to): dtype = from_.dtype - from_iter = from_.create_iter() - to_iter = to.create_iter() - while not from_iter.done(): + from_iter, from_state = from_.create_iter() + to_iter, to_state = to.create_iter() + while not from_iter.done(from_state): byteswap_driver.jit_merge_point(dtype=dtype) - to_iter.setitem(dtype.itemtype.byteswap(from_iter.getitem())) - to_iter.next() - from_iter.next() + val = dtype.itemtype.byteswap(from_iter.getitem(from_state)) + to_iter.setitem(to_state, val) + to_state = to_iter.next(to_state) + from_state = from_iter.next(from_state) choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], @@ -561,13 +573,15 @@ def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) - iterators = [a.create_iter(shape) for a in choices] - arr_iter = arr.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + pairs = [a.create_iter(shape) for a in choices] + iterators = [i[0] for i in pairs] + states = [i[1] for i in pairs] + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, mode=mode) - index = support.index_w(space, arr_iter.getitem()) + index = support.index_w(space, arr_iter.getitem(arr_state)) if index < 0 or index >= len(iterators): if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( @@ -580,72 +594,73 @@ index = 0 else: index = len(iterators) - 1 - out_iter.setitem(iterators[index].getitem().convert_to(space, dtype)) - for iter in iterators: - iter.next() - out_iter.next() - arr_iter.next() + val = iterators[index].getitem(states[index]).convert_to(space, dtype) + out_iter.setitem(out_state, val) + for i in range(len(iterators)): + states[i] = iterators[i].next(states[i]) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def clip(space, arr, shape, min, max, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) dtype = out.get_dtype() shapelen = len(shape) - min_iter = min.create_iter(shape) - max_iter = max.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + min_iter, min_state = min.create_iter(shape) + max_iter, max_state = max.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) - w_min = min_iter.getitem().convert_to(space, dtype) - w_max = max_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) + w_min = min_iter.getitem(min_state).convert_to(space, dtype) + w_max = max_iter.getitem(max_state).convert_to(space, dtype) if dtype.itemtype.lt(w_v, w_min): w_v = w_min elif dtype.itemtype.gt(w_v, w_max): w_v = w_max - out_iter.setitem(w_v) - arr_iter.next() - max_iter.next() - out_iter.next() - min_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + min_state = min_iter.next(min_state) + max_state = max_iter.next(max_state) + out_state = out_iter.next(out_state) round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def round(space, arr, dtype, shape, decimals, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + while not arr_iter.done(arr_state): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) w_v = dtype.itemtype.round(w_v, decimals) - out_iter.setitem(w_v) - arr_iter.next() - out_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + out_state = out_iter.next(out_state) diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() i = 0 index = [0] * 2 while i < size: diagonal_simple_driver.jit_merge_point(axis1=axis1, axis2=axis2) index[axis1] = i index[axis2] = i + offset - out_iter.setitem(arr.getitem_index(space, index)) + out_iter.setitem(out_state, arr.getitem_index(space, index)) i += 1 - out_iter.next() + out_state = out_iter.next(out_state) def diagonal_array(space, arr, out, offset, axis1, axis2, shape): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 @@ -667,6 +682,6 @@ indexes = (iter.indexes[:a] + [last_index + offset] + iter.indexes[a:b] + [last_index] + iter.indexes[b:shapelen_minus_1]) - out_iter.setitem(arr.getitem_index(space, indexes)) + out_iter.setitem(out_state, arr.getitem_index(space, indexes)) iter.next() - out_iter.next() + out_state = out_iter.next(out_state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -18,7 +18,7 @@ multi_axis_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator -from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ +from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple @@ -260,24 +260,24 @@ return space.call_function(cache.w_array_str, self) def dump_data(self, prefix='array(', separator=',', suffix=')'): - i = self.create_iter() + i, state = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() s.append(prefix) if not self.is_scalar(): s.append('[') - while not i.done(): + while not i.done(state): if first: first = False else: s.append(separator) s.append(' ') if self.is_scalar() and dtype.is_str(): - s.append(dtype.itemtype.to_str(i.getitem())) + s.append(dtype.itemtype.to_str(i.getitem(state))) else: - s.append(dtype.itemtype.str_format(i.getitem())) - i.next() + s.append(dtype.itemtype.str_format(i.getitem(state))) + state = i.next(state) if not self.is_scalar(): s.append(']') s.append(suffix) @@ -469,29 +469,33 @@ def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) - def to_coords(self, space, w_index): - coords, _, _ = to_coords(space, self.get_shape(), - self.get_size(), self.get_order(), - w_index) - return coords - - def descr_item(self, space, w_arg=None): - if space.is_none(w_arg): + def descr_item(self, space, __args__): + args_w, kw_w = __args__.unpack() + if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): + args_w = space.fixedview(args_w[0]) + shape = self.get_shape() + coords = [0] * len(shape) + if len(args_w) == 0: if self.get_size() == 1: w_obj = self.get_scalar_value() assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, "can only convert an array of size 1 to a Python scalar") - if space.isinstance_w(w_arg, space.w_int): - if self.is_scalar(): - raise oefmt(space.w_IndexError, "index out of bounds") - i = self.to_coords(space, w_arg) - item = self.getitem(space, i) - assert isinstance(item, boxes.W_GenericBox) - return item.item(space) - raise OperationError(space.w_NotImplementedError, space.wrap( - "non-int arg not supported")) + elif len(args_w) == 1 and len(shape) != 1: + value = support.index_w(space, args_w[0]) + value = support.check_and_adjust_index(space, value, self.get_size(), -1) + for idim in range(len(shape) - 1, -1, -1): + coords[idim] = value % shape[idim] + value //= shape[idim] + elif len(args_w) == len(shape): + for idim in range(len(shape)): + coords[idim] = support.index_w(space, args_w[idim]) + else: + raise oefmt(space.w_ValueError, "incorrect number of indices for array") + item = self.getitem(space, coords) + assert isinstance(item, boxes.W_GenericBox) + return item.item(space) def descr_itemset(self, space, args_w): if len(args_w) == 0: @@ -818,8 +822,8 @@ if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - iter = self.create_iter() - return space.wrap(space.is_true(iter.getitem())) + iter, state = self.create_iter() + return space.wrap(space.is_true(iter.getitem(state))) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): @@ -1085,11 +1089,11 @@ builder = StringBuilder() if isinstance(self.implementation, SliceArray): - iter = self.implementation.create_iter() - while not iter.done(): - box = iter.getitem() + iter, state = self.implementation.create_iter() + while not iter.done(state): + box = iter.getitem(state) builder.append(box.raw_str()) - iter.next() + state = iter.next(state) else: builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -1,99 +1,50 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import ufuncs, support, concrete from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement, shape_agreement_multiple) -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator -from pypy.module.micronumpy.concrete import SliceArray -from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy import ufuncs, support + shape_agreement, shape_agreement_multiple) -class AbstractIterator(object): - def done(self): - raise NotImplementedError("Abstract Class") - - def next(self): - raise NotImplementedError("Abstract Class") - - def getitem(self, space, array): - raise NotImplementedError("Abstract Class") - -class IteratorMixin(object): - _mixin_ = True - def __init__(self, it, op_flags): - self.it = it - self.op_flags = op_flags - - def done(self): - return self.it.done() - - def next(self): - self.it.next() - - def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, array, self.it) - - def setitem(self, space, array, val): - xxx - -class BoxIterator(IteratorMixin, AbstractIterator): - index = 0 - -class ExternalLoopIterator(IteratorMixin, AbstractIterator): - index = 1 - def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + if space.is_w(w_op_flags, space.w_None): + w_op_flags = space.newtuple([space.wrap('readonly')]) + if not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) ret = [] - if space.is_w(w_op_flags, space.w_None): + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) for i in range(n): - ret.append(OpFlag()) - elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ - space.isinstance_w(w_op_flags, space.w_list): - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - else: - w_lst = space.listview(w_op_flags) - if space.isinstance_w(w_lst[0], space.w_tuple) or \ - space.isinstance_w(w_lst[0], space.w_list): - if len(w_lst) != n: - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - for item in w_lst: - ret.append(parse_one_arg(space, space.listview(item))) - else: - op_flag = parse_one_arg(space, w_lst) - for i in range(n): - ret.append(op_flag) + ret.append(op_flag) return ret + class OpFlag(object): def __init__(self): - self.rw = 'r' + self.rw = '' self.broadcast = True self.force_contig = False self.force_align = False self.native_byte_order = False self.tmp_copy = '' self.allocate = False - self.get_it_item = (get_readonly_item, get_readonly_slice) -def get_readonly_item(space, array, it): - return space.wrap(it.getitem()) - -def get_readwrite_item(space, array, it): - #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) - #it.dtype.setitem(res, 0, it.getitem()) - return W_NDimArray(res) - -def get_readonly_slice(space, array, it): - return W_NDimArray(it.getslice().readonly()) - -def get_readwrite_slice(space, array, it): - return W_NDimArray(it.getslice()) def parse_op_flag(space, lst): op_flag = OpFlag() @@ -121,39 +72,38 @@ op_flag.allocate = True elif item == 'no_subtype': raise OperationError(space.w_NotImplementedError, space.wrap( - '"no_subtype" op_flag not implemented yet')) + '"no_subtype" op_flag not implemented yet')) elif item == 'arraymask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"arraymask" op_flag not implemented yet')) + '"arraymask" op_flag not implemented yet')) elif item == 'writemask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"writemask" op_flag not implemented yet')) + '"writemask" op_flag not implemented yet')) else: raise OperationError(space.w_ValueError, space.wrap( - 'op_flags must be a tuple or array of per-op flag-tuples')) - if op_flag.rw == 'r': - op_flag.get_it_item = (get_readonly_item, get_readonly_slice) - elif op_flag.rw == 'rw': - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) - elif op_flag.rw == 'w': - # XXX Extra logic needed to make sure writeonly - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == '': + raise oefmt(space.w_ValueError, + "None of the iterator flags READWRITE, READONLY, or " + "WRITEONLY were specified for an operand") return op_flag + def parse_func_flags(space, nditer, w_flags): if space.is_w(w_flags, space.w_None): return elif not space.isinstance_w(w_flags, space.w_tuple) and not \ - space.isinstance_w(w_flags, space.w_list): + space.isinstance_w(w_flags, space.w_list): raise OperationError(space.w_ValueError, space.wrap( - 'Iter global flags must be a list or tuple of strings')) + 'Iter global flags must be a list or tuple of strings')) lst = space.listview(w_flags) for w_item in lst: if not space.isinstance_w(w_item, space.w_str) and not \ - space.isinstance_w(w_item, space.w_unicode): + space.isinstance_w(w_item, space.w_unicode): typename = space.type(w_item).getname(space) - raise OperationError(space.w_TypeError, space.wrap( - 'expected string or Unicode object, %s found' % typename)) + raise oefmt(space.w_TypeError, + 'expected string or Unicode object, %s found', + typename) item = space.str_w(w_item) if item == 'external_loop': raise OperationError(space.w_NotImplementedError, space.wrap( @@ -187,21 +137,24 @@ elif item == 'zerosize_ok': nditer.zerosize_ok = True else: - raise OperationError(space.w_ValueError, space.wrap( - 'Unexpected iterator global flag "%s"' % item)) + raise oefmt(space.w_ValueError, + 'Unexpected iterator global flag "%s"', + item) if nditer.tracked_index and nditer.external_loop: - raise OperationError(space.w_ValueError, space.wrap( - 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' - 'multi-index is being tracked')) + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + def is_backward(imp, order): if order == 'K' or (order == 'C' and imp.order == 'C'): return False - elif order =='F' and imp.order == 'C': + elif order == 'F' and imp.order == 'C': return True else: raise NotImplementedError('not implemented yet') + def get_iter(space, order, arr, shape, dtype): imp = arr.implementation backward = is_backward(imp, order) @@ -223,19 +176,6 @@ shape, backward) return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) -def get_external_loop_iter(space, order, arr, shape): - imp = arr.implementation - backward = is_backward(imp, order) - return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) - -def convert_to_array_or_none(space, w_elem): - ''' - None will be passed through, all others will be converted - ''' - if space.is_none(w_elem): - return None - return convert_to_array(space, w_elem) - class IndexIterator(object): def __init__(self, shape, backward=False): @@ -263,10 +203,10 @@ ret += self.index[i] * self.shape[i - 1] return ret + class W_NDIter(W_Root): - def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, - w_op_axes, w_itershape, w_buffersize, order): + w_op_axes, w_itershape, w_buffersize, order): self.order = order self.external_loop = False self.buffered = False @@ -286,9 +226,11 @@ if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) - self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + self.seq = [convert_to_array(space, w_elem) + if not space.is_none(w_elem) else None + for w_elem in w_seq_as_list] else: - self.seq =[convert_to_array(space, w_seq)] + self.seq = [convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, @@ -308,9 +250,9 @@ self.dtypes = [] # handle None or writable operands, calculate my shape - self.iters=[] - outargs = [i for i in range(len(self.seq)) \ - if self.seq[i] is None or self.op_flags[i].rw == 'w'] + self.iters = [] + outargs = [i for i in range(len(self.seq)) + if self.seq[i] is None or self.op_flags[i].rw == 'w'] if len(outargs) > 0: out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: @@ -325,14 +267,12 @@ out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: - self.op_flags[i].get_it_item = (get_readwrite_item, - get_readwrite_slice) self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue - out_dtype = ufuncs.find_binop_result_dtype(space, - self.seq[i].get_dtype(), out_dtype) + out_dtype = ufuncs.find_binop_result_dtype( + space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? @@ -360,8 +300,9 @@ self.dtypes[i] = seq_d elif selfd != seq_d: if not 'r' in self.op_flags[i].tmp_copy: - raise OperationError(space.w_TypeError, space.wrap( - "Iterator operand required copying or buffering for operand %d" % i)) + raise oefmt(space.w_TypeError, + "Iterator operand required copying or " + "buffering for operand %d", i) impl = self.seq[i].implementation new_impl = impl.astype(space, selfd) self.seq[i] = W_NDimArray(new_impl) @@ -370,18 +311,14 @@ self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand - if self.external_loop: - for i in range(len(self.seq)): - self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, - self.seq[i], iter_shape), self.op_flags[i])) - else: - for i in range(len(self.seq)): - self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i], iter_shape, self.dtypes[i]), - self.op_flags[i])) + for i in range(len(self.seq)): + it = get_iter(space, self.order, self.seq[i], iter_shape, self.dtypes[i]) + self.iters.append((it, it.reset())) + def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): - raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + raise oefmt(space.w_ValueError, + "op_axes must be a tuple/list matching the number of ops") op_axes = space.listview(w_op_axes) l = -1 for w_axis in op_axes: @@ -390,10 +327,14 @@ if l == -1: l = axis_len elif axis_len != l: - raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) - self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + raise oefmt(space.w_ValueError, + "Each entry of op_axes must have the same size") + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 + for x in space.listview(w_axis)]) if l == -1: - raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise oefmt(space.w_ValueError, + "If op_axes is provided, at least one list of axes " + "must be contained within it") raise Exception('xxx TODO') # Check that values make sense: # - in bounds for each operand @@ -404,24 +345,34 @@ def descr_iter(self, space): return space.wrap(self) + def getitem(self, it, st, op_flags): + if op_flags.rw == 'r': + impl = concrete.ConcreteNonWritableArrayWithBase + else: + impl = concrete.ConcreteArrayWithBase + res = impl([], it.array.dtype, it.array.order, [], [], + it.array.storage, self) + res.start = st.offset + return W_NDimArray(res) + def descr_getitem(self, space, w_idx): idx = space.int_w(w_idx) try: - ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + it, st = self.iters[idx] except IndexError: - raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) - return ret + raise oefmt(space.w_IndexError, + "Iterator operand index %d is out of bounds", idx) + return self.getitem(it, st, self.op_flags[idx]) def descr_setitem(self, space, w_idx, w_value): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_len(self, space): space.wrap(len(self.iters)) def descr_next(self, space): - for it in self.iters: - if not it.done(): + for it, st in self.iters: + if not it.done(st): break else: self.done = True @@ -432,20 +383,20 @@ self.index_iter.next() else: self.first_next = False - for i in range(len(self.iters)): - res.append(self.iters[i].getitem(space, self.seq[i])) - self.iters[i].next() - if len(res) <2: + for i, (it, st) in enumerate(self.iters): + res.append(self.getitem(it, st, self.op_flags[i])) + self.iters[i] = (it, it.next(st)) + if len(res) < 2: return res[0] return space.newtuple(res) def iternext(self): if self.index_iter: self.index_iter.next() - for i in range(len(self.iters)): - self.iters[i].next() - for it in self.iters: - if not it.done(): + for i, (it, st) in enumerate(self.iters): + self.iters[i] = (it, it.next(st)) + for it, st in self.iters: + if not it.done(st): break else: self.done = True @@ -456,29 +407,23 @@ return space.wrap(self.iternext()) def descr_copy(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_debug_print(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_enable_external_loop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") @unwrap_spec(axis=int) def descr_remove_axis(self, space, axis): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_remove_multi_index(self, space, w_multi_index): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_reset(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_operands(self, space): l_w = [] @@ -496,17 +441,16 @@ return space.wrap(self.done) def descr_get_has_delayed_bufalloc(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_has_index(self, space): return space.wrap(self.tracked_index in ["C", "F"]) def descr_get_index(self, space): if not self.tracked_index in ["C", "F"]: - raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + raise oefmt(space.w_ValueError, "Iterator does not have an index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.wrap(self.index_iter.getvalue()) def descr_get_has_multi_index(self, space): @@ -514,51 +458,44 @@ def descr_get_multi_index(self, space): if not self.tracked_index == "multi": - raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + raise oefmt(space.w_ValueError, "Iterator is not tracking a multi-index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.newtuple([space.wrap(x) for x in self.index_iter.index]) def descr_get_iterationneedsapi(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_iterindex(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_itersize(self, space): return space.wrap(support.product(self.shape)) def descr_get_itviews(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_ndim(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_nop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_shape(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_value(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") - at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), - w_op_dtypes = WrappedDefault(None), order=str, + at unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): + w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order) + w_itershape, w_buffersize, order) W_NDIter.typedef = TypeDef( 'nditer', diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -148,20 +148,22 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() index_impl = index_arr.implementation index_iter = AllButAxisIter(index_impl, axis) + index_state = index_iter.reset() stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): + while not arr_iter.done(arr_state): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + - index_iter.offset, i) + index_state.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, arr_iter.offset) + arr.get_storage(), storage, index_state.offset, arr_state.offset) ArgSort(r).sort() - arr_iter.next() - index_iter.next() + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) return index_arr return argsort @@ -292,12 +294,13 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) + while not arr_iter.done(arr_state): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_state.offset) ArgSort(r).sort() - arr_iter.next() + arr_state = arr_iter.next(arr_state) return sort diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -233,30 +233,6 @@ return dtype -def to_coords(space, shape, size, order, w_item_or_slice): - '''Returns a start coord, step, and length. - ''' - start = lngth = step = 0 - if not (space.isinstance_w(w_item_or_slice, space.w_int) or - space.isinstance_w(w_item_or_slice, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) - - start, stop, step, lngth = space.decode_index4(w_item_or_slice, size) - - coords = [0] * len(shape) - i = start - if order == 'C': - for s in range(len(shape) -1, -1, -1): - coords[s] = i % shape[s] - i //= shape[s] - else: - for s in range(len(shape)): - coords[s] = i % shape[s] - i //= shape[s] - return coords, step, lngth - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -25,3 +25,18 @@ for x in s: i *= x return i + + +def check_and_adjust_index(space, index, size, axis): + if index < -size or index >= size: + if axis >= 0: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for axis %d with size %d", + index, axis, size) + else: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for size %d", + index, size) + if index < 0: + index += size + return index diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -16,17 +16,18 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 3 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 3 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 5 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 5 + assert s.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -35,17 +36,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 9 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 9 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 1 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 1 + assert s.indices == [1,0] def test_iterator_step(self): #iteration in C order with #contiguous layout => strides[-1] is 1 @@ -56,22 +58,23 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 6 - assert not i.done() - assert i.indices == [1,1] + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 6 + assert not i.done(s) + assert s.indices == [1,1] #And for some big skips - i.next_skip_x(5) - assert i.offset == 11 - assert i.indices == [2,1] - i.next_skip_x(5) + s = i.next_skip_x(s, 5) + assert s.offset == 11 + assert s.indices == [2,1] + s = i.next_skip_x(s, 5) # Note: the offset does not overflow but recycles, # this is good for broadcast - assert i.offset == 1 - assert i.indices == [0,1] - assert i.done() + assert s.offset == 1 + assert s.indices == [0,1] + assert i.done(s) #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -80,17 +83,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 4 - assert i.indices == [1,1] - assert not i.done() - i.next_skip_x(5) - assert i.offset == 5 - assert i.indices == [2,1] - assert not i.done() - i.next_skip_x(5) - assert i.indices == [0,1] - assert i.offset == 3 - assert i.done() + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 4 + assert s.indices == [1,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.offset == 5 + assert s.indices == [2,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.indices == [0,1] + assert s.offset == 3 + assert i.done(s) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -164,24 +164,6 @@ assert calc_new_strides([1, 1, 105, 1, 1], [7, 15], [1, 7],'F') == \ [1, 1, 1, 105, 105] - def test_to_coords(self): - from pypy.module.micronumpy.strides import to_coords - - def _to_coords(index, order): - return to_coords(self.space, [2, 3, 4], 24, order, - self.space.wrap(index))[0] - - assert _to_coords(0, 'C') == [0, 0, 0] - assert _to_coords(1, 'C') == [0, 0, 1] - assert _to_coords(-1, 'C') == [1, 2, 3] - assert _to_coords(5, 'C') == [0, 1, 1] - assert _to_coords(13, 'C') == [1, 0, 1] - assert _to_coords(0, 'F') == [0, 0, 0] - assert _to_coords(1, 'F') == [1, 0, 0] From noreply at buildbot.pypy.org Sat Apr 19 20:49:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 19 Apr 2014 20:49:02 +0200 (CEST) Subject: [pypy-commit] pypy py3k: __builtin__ -> builtins Message-ID: <20140419184902.5AF6D1D23CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70783:59e6a6bce81b Date: 2014-04-19 11:48 -0700 http://bitbucket.org/pypy/pypy/changeset/59e6a6bce81b/ Log: __builtin__ -> builtins diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -112,7 +112,7 @@ space.call_function(w_pathsetter, w_path) # import site try: - import_ = space.getattr(space.getbuiltinmodule('__builtin__'), + import_ = space.getattr(space.getbuiltinmodule('builtins'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) return 0 From noreply at buildbot.pypy.org Sat Apr 19 21:55:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 21:55:49 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Pass the segment base to stmcb_expand_marker Message-ID: <20140419195549.74E601D2CAC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1169:8bd21f95eb8a Date: 2014-04-19 18:41 +0200 http://bitbucket.org/pypy/stmgc/changeset/8bd21f95eb8a/ Log: Pass the segment base to stmcb_expand_marker diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -3,7 +3,7 @@ #endif -void (*stmcb_expand_marker)(uintptr_t odd_number, +void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); @@ -21,7 +21,7 @@ uintptr_t x = (uintptr_t)current->ss; if (x & 1) { /* the stack entry is an odd number */ - stmcb_expand_marker(x, current[1].ss, + stmcb_expand_marker(pseg->pub.segment_base, x, current[1].ss, pseg->marker_self, _STM_MARKER_LEN); if (pseg->marker_self[0] == 0) { diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -378,7 +378,7 @@ /* The markers pushed in the shadowstack are an odd number followed by a regular pointer. When needed, this library invokes this callback to turn this pair into a human-readable explanation. */ -extern void (*stmcb_expand_marker)(uintptr_t odd_number, +extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -123,7 +123,8 @@ void stm_flush_timing(stm_thread_local_t *, int); -void (*stmcb_expand_marker)(uintptr_t odd_number, object_t *following_object, +void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, + object_t *following_object, char *outputbuf, size_t outputbufsize); void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -46,8 +46,8 @@ assert tl.longest_marker_other[0] == '\x00' def test_abort_marker_no_shadowstack_cb(self): - @ffi.callback("void(uintptr_t, object_t *, char *, size_t)") - def expand_marker(number, ptr, outbuf, outbufsize): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): seen.append(1) lib.stmcb_expand_marker = expand_marker seen = [] @@ -60,8 +60,8 @@ assert not seen def test_abort_marker_shadowstack_cb(self): - @ffi.callback("void(uintptr_t, object_t *, char *, size_t)") - def expand_marker(number, ptr, outbuf, outbufsize): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): s = '%d %r\x00' % (number, ptr) assert len(s) <= outbufsize outbuf[0:len(s)] = s @@ -116,8 +116,8 @@ py.test.raises(EmptyStack, self.pop_root) def test_stm_expand_marker(self): - @ffi.callback("void(uintptr_t, object_t *, char *, size_t)") - def expand_marker(number, ptr, outbuf, outbufsize): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): s = '%d %r\x00' % (number, ptr) assert len(s) <= outbufsize outbuf[0:len(s)] = s From noreply at buildbot.pypy.org Sat Apr 19 21:55:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 21:55:50 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Another hook Message-ID: <20140419195550.A69851D2CAC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1170:7660960de054 Date: 2014-04-19 21:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/7660960de054/ Log: Another hook diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -7,6 +7,9 @@ object_t *following_object, char *outputbuf, size_t outputbufsize); +void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) { @@ -55,6 +58,9 @@ earlier than now (some objects may be GCed), but we only know here the total time it gets attributed. */ + if (stmcb_debug_print) { + stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); + } if (time * 0.99 > tl->longest_marker_time) { tl->longest_marker_state = attribute_to; tl->longest_marker_time = time; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -381,6 +381,8 @@ extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); +extern void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); /* Conventience macros to push the markers into the shadowstack */ #define STM_PUSH_MARKER(tl, odd_num, p) do { \ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -126,6 +126,8 @@ void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); +void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); void stm_update_marker_num(stm_thread_local_t *, uintptr_t); @@ -464,6 +466,7 @@ def teardown_method(self, meth): lib.stmcb_expand_marker = ffi.NULL + lib.stmcb_debug_print = ffi.NULL tl = self.tls[self.current_thread] if lib._stm_in_transaction(tl) and lib.stm_is_inevitable(): self.commit_transaction() # must succeed! diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -130,3 +130,26 @@ self.push_root(stm_allocate(16)) raw = lib._stm_expand_marker() assert ffi.string(raw) == '29 %r' % (p,) + + def test_stmcb_debug_print(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '<<<%d>>>\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + @ffi.callback("void(char *, double, char *)") + def debug_print(cause, time, marker): + if 0.0 < time < 1.0: + time = "time_ok" + seen.append((ffi.string(cause), time, ffi.string(marker))) + seen = [] + lib.stmcb_expand_marker = expand_marker + lib.stmcb_debug_print = debug_print + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + self.abort_transaction() + # + assert seen == [("run aborted other", "time_ok", "<<<29>>>")] From noreply at buildbot.pypy.org Sat Apr 19 21:56:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 21:56:44 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/7660960de054 (branch "marker") Message-ID: <20140419195644.DD2111D2CAC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70784:d8ac085e3922 Date: 2014-04-19 21:27 +0200 http://bitbucket.org/pypy/pypy/changeset/d8ac085e3922/ Log: import stmgc/7660960de054 (branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -8bd21f95eb8a +7660960de054 diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -8,6 +8,9 @@ object_t *following_object, char *outputbuf, size_t outputbufsize); +void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) { @@ -56,6 +59,9 @@ earlier than now (some objects may be GCed), but we only know here the total time it gets attributed. */ + if (stmcb_debug_print) { + stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); + } if (time * 0.99 > tl->longest_marker_time) { tl->longest_marker_state = attribute_to; tl->longest_marker_time = time; diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -382,6 +382,8 @@ extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize); +extern void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); /* Conventience macros to push the markers into the shadowstack */ #define STM_PUSH_MARKER(tl, odd_num, p) do { \ From noreply at buildbot.pypy.org Sat Apr 19 21:56:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Apr 2014 21:56:46 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: First step: reporting in the logs (PYPYLOG=stm-report:-) Message-ID: <20140419195646.0E1321D2CAC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70785:eb2840c9b83c Date: 2014-04-19 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/eb2840c9b83c/ Log: First step: reporting in the logs (PYPYLOG=stm-report:-) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -884,6 +884,7 @@ print >> f print >> f, '#include "preimpl.h"' print >> f, '#include "src/rtyper.h"' + print >> f, '#include "src/debug_print.h"' print >> f, '#include "src_stm/extracode.h"' def commondefs(defines): diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -73,47 +73,71 @@ RPyStringSpace0 *co_filename; RPyStringSpace0 *co_name; RPyStringSpace0 *co_lnotab; + char *ntrunc = "", *fntrunc = ""; + long fnlen, nlen, line; + char *fn, *name; - co_filename = _fetch_rpyspace0(segment_base, o, g_co_filename_ofs); - co_name = _fetch_rpyspace0(segment_base, o, g_co_name_ofs); - co_firstlineno = _fetch_lngspace0(segment_base, o, g_co_firstlineno_ofs); - co_lnotab = _fetch_rpyspace0(segment_base, o, g_co_lnotab_ofs); + if (o) { + co_filename =_fetch_rpyspace0(segment_base, o, g_co_filename_ofs); + co_name =_fetch_rpyspace0(segment_base, o, g_co_name_ofs); + co_firstlineno=_fetch_lngspace0(segment_base, o, g_co_firstlineno_ofs); + co_lnotab =_fetch_rpyspace0(segment_base, o, g_co_lnotab_ofs); - char *ntrunc = "", *fntrunc = ""; + long remaining = outputbufsize - 32; + nlen = RPyString_Size(co_name); + name = _RPyString_AsString(co_name); + if (nlen > remaining / 2) { + nlen = remaining / 2; + ntrunc = "..."; + } + remaining -= nlen; - long remaining = outputbufsize - 32; - long nlen = RPyString_Size(co_name); - char *name = _RPyString_AsString(co_name); - if (nlen > remaining / 2) { - nlen = remaining / 2; - ntrunc = "..."; + fnlen = RPyString_Size(co_filename); + fn = _RPyString_AsString(co_filename); + if (fnlen > remaining) { + fn += (fnlen - remaining); + fnlen = remaining; + fntrunc = "..."; + } + + long lnotablen = RPyString_Size(co_lnotab); + char *lnotab = _RPyString_AsString(co_lnotab); + uintptr_t next_instr = odd_number >> 1; + line = co_firstlineno; + uintptr_t i, addr = 0; + for (i = 0; i < lnotablen; i += 2) { + addr += ((unsigned char *)lnotab)[i]; + if (addr > next_instr) + break; + line += ((unsigned char *)lnotab)[i + 1]; + } } - remaining -= nlen; - - long fnlen = RPyString_Size(co_filename); - char *fn = _RPyString_AsString(co_filename); - if (fnlen > remaining) { - fn += (fnlen - remaining); - fnlen = remaining; - fntrunc = "..."; - } - - long lnotablen = RPyString_Size(co_lnotab); - char *lnotab = _RPyString_AsString(co_lnotab); - uintptr_t next_instr = odd_number >> 1; - long line = co_firstlineno; - uintptr_t i, addr = 0; - for (i = 0; i < lnotablen; i += 2) { - addr += ((unsigned char *)lnotab)[i]; - if (addr > next_instr) - break; - line += ((unsigned char *)lnotab)[i + 1]; + else { + fnlen = 1; + fn = "?"; + nlen = 1; + name = "?"; + line = 0; } snprintf(outputbuf, outputbufsize, "File \"%s%.*s\", line %ld, in %.*s%s", fntrunc, (int)fnlen, fn, line, (int)nlen, name, ntrunc); } +#define REPORT_MINIMUM_TIME 0.0001 /* 0.1 millisecond; xxx tweak */ + +static void _stm_cb_debug_print(const char *cause, double time, + const char *marker) +{ + if (time >= REPORT_MINIMUM_TIME) { + PYPY_DEBUG_START("stm-report"); + fprintf(PYPY_DEBUG_FILE, "%s %s\n%s %.6fs: %s\n", + pypy_debug_threadid, marker, + pypy_debug_threadid, time, cause); + PYPY_DEBUG_STOP("stm-report"); + } +} + void pypy_stm_setup_expand_marker(long co_filename_ofs, long co_name_ofs, long co_firstlineno_ofs, @@ -124,4 +148,10 @@ g_co_firstlineno_ofs = co_firstlineno_ofs; g_co_lnotab_ofs = co_lnotab_ofs; stmcb_expand_marker = _stm_expand_marker_for_pypy; + + PYPY_DEBUG_START("stm-report"); + if (PYPY_HAVE_DEBUG_PRINTS) { + stmcb_debug_print = _stm_cb_debug_print; + } + PYPY_DEBUG_STOP("stm-report"); } diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -152,7 +152,6 @@ print '<', x.count, y.count, '>' return 0 # - perform_transaction = rstm.make_perform_transaction(check, PS) t, cbuilder = self.compile(entry_point, backendopt=True) data = cbuilder.cmdexec('a b c d') assert '< 5 1000 >' in data, "got: %r" % (data,) @@ -185,7 +184,6 @@ print bug2(1) return 0 # - perform_transaction = rstm.make_perform_transaction(check, PS) t, cbuilder = self.compile(entry_point, backendopt=True) data = cbuilder.cmdexec('') assert '12\n12\n' in data, "got: %r" % (data,) @@ -219,7 +217,6 @@ do_stuff() return 0 # - perform_transaction = rstm.make_perform_transaction(check, PS) t, cbuilder = self.compile(main) data = cbuilder.cmdexec('') assert '42\n' in data, "got: %r" % (data,) @@ -561,3 +558,40 @@ assert ('starting some_extremely_longish_and_boring_function_name\n' 'File "...bla/br/project/foobaz.py", line 81,' ' in some_extremely_longish_a...\n') in data + + def test_pypy_marker_2(self): + import time + class PyCode(object): + def __init__(self, co_filename, co_name, + co_firstlineno, co_lnotab): + self.co_filename = co_filename + self.co_name = co_name + self.co_firstlineno = co_firstlineno + self.co_lnotab = co_lnotab + # + def check(foobar, retry_counter): + if retry_counter <= 1: + rstm.push_marker(29, lltype.nullptr(rffi.CCHARP.TO)) + start = time.time() + while abs(time.time() - start) < 0.1: + pass + rstm.abort_and_retry() + return 0 + # + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) + def entry_point(argv): + pycode1 = PyCode("/tmp/foobar.py", "baz", 40, "\x00\x01\x05\x01") + llop.stm_setup_expand_marker_for_pypy( + lltype.Void, pycode1, + "co_filename", "co_name", "co_firstlineno", "co_lnotab") + perform_transaction(lltype.malloc(S)) + return 0 + # + t, cbuilder = self.compile(entry_point, backendopt=True) + data, err = cbuilder.cmdexec('a b c d', err=True, + env={'PYPYLOG': 'stm-report:-'}) + assert '0# File "?", line 0, in ?\n' in err + assert '0# 0.1' in err + assert 's: run aborted other\n' in err From noreply at buildbot.pypy.org Sat Apr 19 23:53:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:30 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: implement as applevel function Message-ID: <20140419215330.6DE8F1C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70786:2bc5f1fb800d Date: 2014-04-19 23:55 +0300 http://bitbucket.org/pypy/pypy/changeset/2bc5f1fb800d/ Log: implement as applevel function diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -24,7 +24,7 @@ return arr # How to call this from descr_searchsorted?? -def searchsort(space, arr, v, side, result): +def searchsort(arr, v, side, result): def left_find_index(a, val): imin = 0 imax = a.size @@ -50,6 +50,6 @@ else: func = right_find_index for i in range(v.get_size()): - result[i] = func(self, v[i]) + result[i] = func(arr, v[i]) return result diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -695,7 +695,6 @@ @unwrap_spec(side=str, w_sorter=WrappedDefault(None)) def descr_searchsorted(self, space, w_v, side='left', w_sorter=None): - from pypy.module.micronumpy.sort import searchsort if not space.is_none(w_sorter): raise OperationError(space.w_NotImplementedError, space.wrap( 'sorter not supported in searchsort')) @@ -718,7 +717,8 @@ if len(v.get_shape()) >1: raise OperationError(space.w_ValueError, space.wrap( "v must be a 1-d array-like")) - return searchsort(self, space, v, side, ret) + app_searchsort(space, self, v, space.wrap(side), ret) + return ret def descr_setasflat(self, space, w_v): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -1271,6 +1271,37 @@ return res """, filename=__file__).interphook('ptp') +app_searchsort = applevel(r""" + def searchsort(arr, v, side, result): + def left_find_index(a, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if a[imid] <= val: + imin = imid +1 + else: + imax = imid + return imin + def right_find_index(a, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if a[imid] < val: + imin = imid +1 + else: + imax = imid + return imin + if side == 'l': + func = left_find_index + else: + func = right_find_index + for i in range(v.size): + result[i] = func(arr, v[i]) + return result +""", filename=__file__).interphook('searchsort') + W_NDimArray.typedef = TypeDef("ndarray", __module__ = "numpy", __new__ = interp2app(descr_new_array), From noreply at buildbot.pypy.org Sat Apr 19 23:53:31 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:31 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: cleanup Message-ID: <20140419215331.C9B111C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70787:aeb6f15e5552 Date: 2014-04-19 23:56 +0300 http://bitbucket.org/pypy/pypy/changeset/aeb6f15e5552/ Log: cleanup diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -23,33 +23,3 @@ i += step return arr -# How to call this from descr_searchsorted?? -def searchsort(arr, v, side, result): - def left_find_index(a, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if a[imid] <= val: - imin = imid +1 - else: - imax = imid - return imin - def right_find_index(a, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if a[imid] < val: - imin = imid +1 - else: - imax = imid - return imin - if side == 'l': - func = left_find_index - else: - func = right_find_index - for i in range(v.get_size()): - result[i] = func(arr, v[i]) - return result - From noreply at buildbot.pypy.org Sat Apr 19 23:53:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:33 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: merge default into branch Message-ID: <20140419215333.598211C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70788:e0560bcc6840 Date: 2014-04-19 23:56 +0300 http://bitbucket.org/pypy/pypy/changeset/e0560bcc6840/ Log: merge default into branch diff too long, truncating to 2000 out of 2685 lines diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -40,7 +40,7 @@ ``pypy-stm`` project is to improve what is so far the state-of-the-art for using multiple CPUs, which for cases where separate processes don't work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is flushing forward an approach to *hide* the threads, as +``pypy-stm`` is pushing forward an approach to *hide* the threads, as described below in `atomic sections`_. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -140,3 +140,6 @@ .. branch: numpypy-nditer Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -284,9 +284,11 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return ArrayIter(self, support.product(shape), shape, r[0], r[1]) - return ArrayIter(self, self.get_size(), self.shape, - self.strides, self.backstrides) + i = ArrayIter(self, support.product(shape), shape, r[0], r[1]) + else: + i = ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) + return i, i.reset() def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop, ufuncs +from pypy.module.micronumpy import descriptor, loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter @@ -156,10 +156,10 @@ "string is smaller than requested size")) a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) - ai = a.create_iter() + ai, state = a.create_iter() for val in items: - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) return space.wrap(a) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -32,24 +32,23 @@ self.reset() def reset(self): - self.iter = self.base.create_iter() + self.iter, self.state = self.base.create_iter() def descr_len(self, space): - return space.wrap(self.base.get_size()) + return space.wrap(self.iter.size) def descr_next(self, space): - if self.iter.done(): + if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) - w_res = self.iter.getitem() - self.iter.next() + w_res = self.iter.getitem(self.state) + self.state = self.iter.next(self.state) return w_res def descr_index(self, space): - return space.wrap(self.iter.index) + return space.wrap(self.state.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.iter.index)) - return space.newtuple([space.wrap(c) for c in coords]) + return space.newtuple([space.wrap(c) for c in self.state.indices]) def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or @@ -58,13 +57,13 @@ self.reset() base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - base_iter = base.create_iter() - base_iter.next_skip_x(start) + base_iter, base_state = base.create_iter() + base_state = base_iter.next_skip_x(base_state, start) if length == 1: - return base_iter.getitem() + return base_iter.getitem(base_state) res = W_NDimArray.from_shape(space, [length], base.get_dtype(), base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, base_iter, step) + return loop.flatiter_getitem(res, base_iter, base_state, step) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,7 +42,6 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support -from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -52,19 +51,20 @@ self.shapelen = len(shape) self.indexes = [0] * len(shape) self._done = False - self.idx_w = [None] * len(idx_w) + self.idx_w_i = [None] * len(idx_w) + self.idx_w_s = [None] * len(idx_w) for i, w_idx in enumerate(idx_w): if isinstance(w_idx, W_NDimArray): - self.idx_w[i] = w_idx.create_iter(shape) + self.idx_w_i[i], self.idx_w_s[i] = w_idx.create_iter(shape) def done(self): return self._done @jit.unroll_safe def next(self): - for w_idx in self.idx_w: - if w_idx is not None: - w_idx.next() + for i, idx_w_i in enumerate(self.idx_w_i): + if idx_w_i is not None: + self.idx_w_s[i] = idx_w_i.next(self.idx_w_s[i]) for i in range(self.shapelen - 1, -1, -1): if self.indexes[i] < self.shape[i] - 1: self.indexes[i] += 1 @@ -79,6 +79,16 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] +class IterState(object): + _immutable_fields_ = ['iterator', 'index', 'indices[*]', 'offset'] + + def __init__(self, iterator, index, indices, offset): + self.iterator = iterator + self.index = index + self.indices = indices + self.offset = offset + + class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]'] @@ -91,90 +101,66 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.reset() def reset(self): - self.index = 0 - self.indices = [0] * len(self.shape_m1) - self.offset = self.array.start + return IterState(self, 0, [0] * len(self.shape_m1), self.array.start) @jit.unroll_safe - def next(self): - self.index += 1 + def next(self, state): + assert state.iterator is self + index = state.index + 1 + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] + idx = indices[i] if idx < self.shape_m1[i]: - self.indices[i] = idx + 1 - self.offset += self.strides[i] + indices[i] = idx + 1 + offset += self.strides[i] break else: - self.indices[i] = 0 - self.offset -= self.backstrides[i] + indices[i] = 0 + offset -= self.backstrides[i] + return IterState(self, index, indices, offset) @jit.unroll_safe - def next_skip_x(self, step): + def next_skip_x(self, state, step): + assert state.iterator is self assert step >= 0 if step == 0: - return - self.index += step + return state + index = state.index + step + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] + idx = indices[i] if idx < (self.shape_m1[i] + 1) - step: - self.indices[i] = idx + step - self.offset += self.strides[i] * step + indices[i] = idx + step + offset += self.strides[i] * step break else: - rem_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + rem_step = (idx + step) // (self.shape_m1[i] + 1) cur_step = step - rem_step * (self.shape_m1[i] + 1) - self.indices[i] += cur_step - self.offset += self.strides[i] * cur_step + indices[i] = idx + cur_step + offset += self.strides[i] * cur_step step = rem_step assert step > 0 + return IterState(self, index, indices, offset) - def done(self): - return self.index >= self.size + def done(self, state): + assert state.iterator is self + return state.index >= self.size - def getitem(self): - return self.array.getitem(self.offset) + def getitem(self, state): + assert state.iterator is self + return self.array.getitem(state.offset) - def getitem_bool(self): - return self.array.getitem_bool(self.offset) + def getitem_bool(self, state): + assert state.iterator is self + return self.array.getitem_bool(state.offset) - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - -class SliceIterator(ArrayIter): - def __init__(self, arr, strides, backstrides, shape, order="C", - backward=False, dtype=None): - if dtype is None: - dtype = arr.implementation.dtype - self.dtype = dtype - self.arr = arr - if backward: - self.slicesize = shape[0] - self.gap = [support.product(shape[1:]) * dtype.elsize] - strides = strides[1:] - backstrides = backstrides[1:] - shape = shape[1:] - strides.reverse() - backstrides.reverse() - shape.reverse() - size = support.product(shape) - else: - shape = [support.product(shape)] - strides, backstrides = calc_strides(shape, dtype, order) - size = 1 - self.slicesize = support.product(shape) - self.gap = strides - - ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) - - def getslice(self): - from pypy.module.micronumpy.concrete import SliceArray - retVal = SliceArray(self.offset, self.gap, self.backstrides, - [self.slicesize], self.arr.implementation, self.arr, self.dtype) - return retVal + def setitem(self, state, elem): + assert state.iterator is self + self.array.setitem(state.offset, elem) def AxisIter(array, shape, axis, cumulative): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -12,11 +12,10 @@ AllButAxisIter -call2_driver = jit.JitDriver(name='numpy_call2', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter']) +call2_driver = jit.JitDriver( + name='numpy_call2', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority @@ -46,47 +45,40 @@ if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) - left_iter = w_lhs.create_iter(shape) - right_iter = w_rhs.create_iter(shape) - out_iter = out.create_iter(shape) + left_iter, left_state = w_lhs.create_iter(shape) + right_iter, right_state = w_rhs.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_lhs=w_lhs, w_rhs=w_rhs, - out=out, - left_iter=left_iter, right_iter=right_iter, - out_iter=out_iter) - w_left = left_iter.getitem().convert_to(space, calc_dtype) - w_right = right_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, w_left, w_right).convert_to( + calc_dtype=calc_dtype, res_dtype=res_dtype) + w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) + w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( space, res_dtype)) - left_iter.next() - right_iter.next() - out_iter.next() + left_state = left_iter.next(left_state) + right_state = right_iter.next(right_state) + out_state = out_iter.next(out_state) return out -call1_driver = jit.JitDriver(name='numpy_call1', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_obj', 'out', 'obj_iter', - 'out_iter']) +call1_driver = jit.JitDriver( + name='numpy_call1', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) - obj_iter = w_obj.create_iter(shape) - out_iter = out.create_iter(shape) + obj_iter, obj_state = w_obj.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_obj=w_obj, out=out, - obj_iter=obj_iter, out_iter=out_iter) - elem = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, elem).convert_to(space, res_dtype)) - out_iter.next() - obj_iter.next() + calc_dtype=calc_dtype, res_dtype=res_dtype) + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) return out setslice_driver = jit.JitDriver(name='numpy_setslice', @@ -96,18 +88,20 @@ def setslice(space, shape, target, source): # note that unlike everything else, target and source here are # array implementations, not arrays - target_iter = target.create_iter(shape) - source_iter = source.create_iter(shape) + target_iter, target_state = target.create_iter(shape) + source_iter, source_state = source.create_iter(shape) dtype = target.dtype shapelen = len(shape) - while not target_iter.done(): + while not target_iter.done(target_state): setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + val = source_iter.getitem(source_state) if dtype.is_str_or_unicode(): - target_iter.setitem(dtype.coerce(space, source_iter.getitem())) + val = dtype.coerce(space, val) else: - target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) - target_iter.next() - source_iter.next() + val = val.convert_to(space, dtype) + target_iter.setitem(target_state, val) + target_state = target_iter.next(target_state) + source_state = source_iter.next(source_state) return target reduce_driver = jit.JitDriver(name='numpy_reduce', @@ -116,22 +110,22 @@ reds = 'auto') def compute_reduce(space, obj, calc_dtype, func, done_func, identity): - obj_iter = obj.create_iter() + obj_iter, obj_state = obj.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, calc_dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval cur_value = func(calc_dtype, cur_value, rval) - obj_iter.next() + obj_state = obj_iter.next(obj_state) return cur_value reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', @@ -139,69 +133,76 @@ reds = 'auto') def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): - obj_iter = obj.create_iter() - out_iter = out.create_iter() + obj_iter, obj_state = obj.create_iter() + out_iter, out_state = out.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) def fill(arr, box): - arr_iter = arr.create_iter() - while not arr_iter.done(): - arr_iter.setitem(box) - arr_iter.next() + arr_iter, arr_state = arr.create_iter() + while not arr_iter.done(arr_state): + arr_iter.setitem(arr_state, box) + arr_state = arr_iter.next(arr_state) def assign(space, arr, seq): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() for item in seq: - arr_iter.setitem(arr_dtype.coerce(space, item)) - arr_iter.next() + arr_iter.setitem(arr_state, arr_dtype.coerce(space, item)) + arr_state = arr_iter.next(arr_state) where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') def where(space, out, shape, arr, x, y, dtype): - out_iter = out.create_iter(shape) - arr_iter = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) arr_dtype = arr.get_dtype() - x_iter = x.create_iter(shape) - y_iter = y.create_iter(shape) + x_iter, x_state = x.create_iter(shape) + y_iter, y_state = y.create_iter(shape) if x.is_scalar(): if y.is_scalar(): - iter = arr_iter + iter, state = arr_iter, arr_state else: - iter = y_iter + iter, state = y_iter, y_state else: - iter = x_iter + iter, state = x_iter, x_state shapelen = len(shape) - while not iter.done(): + while not iter.done(state): where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, arr_dtype=arr_dtype) - w_cond = arr_iter.getitem() + w_cond = arr_iter.getitem(arr_state) if arr_dtype.itemtype.bool(w_cond): - w_val = x_iter.getitem().convert_to(space, dtype) + w_val = x_iter.getitem(x_state).convert_to(space, dtype) else: - w_val = y_iter.getitem().convert_to(space, dtype) - out_iter.setitem(w_val) - out_iter.next() - arr_iter.next() - x_iter.next() - y_iter.next() + w_val = y_iter.getitem(y_state).convert_to(space, dtype) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) + x_state = x_iter.next(x_state) + y_state = y_iter.next(y_state) + if x.is_scalar(): + if y.is_scalar(): + state = arr_state + else: + state = y_state + else: + state = x_state return out axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', @@ -212,31 +213,36 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) + out_state = out_iter.reset() if cumulative: temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) + temp_state = temp_iter.reset() else: - temp_iter = out_iter # hack - arr_iter = arr.create_iter() + temp_iter = out_iter # hack + temp_state = out_state + arr_iter, arr_state = arr.create_iter() if identity is not None: identity = identity.convert_to(space, dtype) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - assert not arr_iter.done() - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.indices[axis] == 0: + assert not arr_iter.done(arr_state) + w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) + if out_state.indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) else: - cur = temp_iter.getitem() + cur = temp_iter.getitem(temp_state) w_val = func(dtype, cur, w_val) - out_iter.setitem(w_val) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) if cumulative: - temp_iter.setitem(w_val) - temp_iter.next() - arr_iter.next() - out_iter.next() + temp_iter.setitem(temp_state, w_val) + temp_state = temp_iter.next(temp_state) + else: + temp_state = out_state + arr_state = arr_iter.next(arr_state) return out @@ -249,18 +255,18 @@ result = 0 idx = 1 dtype = arr.get_dtype() - iter = arr.create_iter() - cur_best = iter.getitem() - iter.next() + iter, state = arr.create_iter() + cur_best = iter.getitem(state) + state = iter.next(state) shapelen = len(arr.get_shape()) - while not iter.done(): + while not iter.done(state): arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_val = iter.getitem() + w_val = iter.getitem(state) new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - iter.next() + state = iter.next(state) idx += 1 return result return argmin_argmax @@ -291,17 +297,19 @@ right_impl = right.implementation assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.create_iter() + outi, outs = result.create_iter() lefti = AllButAxisIter(left_impl, len(left_shape) - 1) righti = AllButAxisIter(right_impl, right_critical_dim) + lefts = lefti.reset() + rights = righti.reset() n = left_impl.shape[-1] s1 = left_impl.strides[-1] s2 = right_impl.strides[right_critical_dim] - while not lefti.done(): - while not righti.done(): - oval = outi.getitem() - i1 = lefti.offset - i2 = righti.offset + while not lefti.done(lefts): + while not righti.done(rights): + oval = outi.getitem(outs) + i1 = lefts.offset + i2 = rights.offset i = 0 while i < n: i += 1 @@ -311,11 +319,11 @@ oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) i1 += s1 i2 += s2 - outi.setitem(oval) - outi.next() - righti.next() - righti.reset() - lefti.next() + outi.setitem(outs, oval) + outs = outi.next(outs) + rights = righti.next(rights) + rights = righti.reset() + lefts = lefti.next(lefts) return result count_all_true_driver = jit.JitDriver(name = 'numpy_count', @@ -324,13 +332,13 @@ def count_all_true_concrete(impl): s = 0 - iter = impl.create_iter() + iter, state = impl.create_iter() shapelen = len(impl.shape) dtype = impl.dtype - while not iter.done(): + while not iter.done(state): count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - s += iter.getitem_bool() - iter.next() + s += iter.getitem_bool(state) + state = iter.next(state) return s def count_all_true(arr): @@ -344,18 +352,18 @@ reds = 'auto') def nonzero(res, arr, box): - res_iter = res.create_iter() - arr_iter = arr.create_iter() + res_iter, res_state = res.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) - while not arr_iter.done(): + while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) - if arr_iter.getitem_bool(): + if arr_iter.getitem_bool(arr_state): for d in dims: - res_iter.setitem(box(arr_iter.indices[d])) - res_iter.next() - arr_iter.next() + res_iter.setitem(res_state, box(arr_state.indices[d])) + res_state = res_iter.next(res_state) + arr_state = arr_iter.next(arr_state) return res @@ -365,26 +373,26 @@ reds = 'auto') def getitem_filter(res, arr, index): - res_iter = res.create_iter() + res_iter, res_state = res.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() - arr_iter = arr.create_iter() + index_iter, index_state = index.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() index_dtype = index.get_dtype() # XXX length of shape of index as well? - while not index_iter.done(): + while not index_iter.done(index_state): getitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - res_iter.setitem(arr_iter.getitem()) - res_iter.next() - index_iter.next() - arr_iter.next() + if index_iter.getitem_bool(index_state): + res_iter.setitem(res_state, arr_iter.getitem(arr_state)) + res_state = res_iter.next(res_state) + index_state = index_iter.next(index_state) + arr_state = arr_iter.next(arr_state) return res setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', @@ -393,41 +401,42 @@ reds = 'auto') def setitem_filter(space, arr, index, value): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() + index_iter, index_state = index.create_iter() if value.get_size() == 1: - value_iter = value.create_iter(arr.get_shape()) + value_iter, value_state = value.create_iter(arr.get_shape()) else: - value_iter = value.create_iter() + value_iter, value_state = value.create_iter() index_dtype = index.get_dtype() arr_dtype = arr.get_dtype() - while not index_iter.done(): + while not index_iter.done(index_state): setitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - arr_iter.setitem(arr_dtype.coerce(space, value_iter.getitem())) - value_iter.next() - arr_iter.next() - index_iter.next() + if index_iter.getitem_bool(index_state): + val = arr_dtype.coerce(space, value_iter.getitem(value_state)) + value_state = value_iter.next(value_state) + arr_iter.setitem(arr_state, val) + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], reds = 'auto') -def flatiter_getitem(res, base_iter, step): - ri = res.create_iter() +def flatiter_getitem(res, base_iter, base_state, step): + ri, rs = res.create_iter() dtype = res.get_dtype() - while not ri.done(): + while not ri.done(rs): flatiter_getitem_driver.jit_merge_point(dtype=dtype) - ri.setitem(base_iter.getitem()) - base_iter.next_skip_x(step) - ri.next() + ri.setitem(rs, base_iter.getitem(base_state)) + base_state = base_iter.next_skip_x(base_state, step) + rs = ri.next(rs) return res flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', @@ -436,19 +445,21 @@ def flatiter_setitem(space, arr, val, start, step, length): dtype = arr.get_dtype() - arr_iter = arr.create_iter() - val_iter = val.create_iter() - arr_iter.next_skip_x(start) + arr_iter, arr_state = arr.create_iter() + val_iter, val_state = val.create_iter() + arr_state = arr_iter.next_skip_x(arr_state, start) while length > 0: flatiter_setitem_driver.jit_merge_point(dtype=dtype) + val = val_iter.getitem(val_state) if dtype.is_str_or_unicode(): - arr_iter.setitem(dtype.coerce(space, val_iter.getitem())) + val = dtype.coerce(space, val) else: - arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) + val = val.convert_to(space, dtype) + arr_iter.setitem(arr_state, val) # need to repeat i_nput values until all assignments are done - arr_iter.next_skip_x(step) + arr_state = arr_iter.next_skip_x(arr_state, step) + val_state = val_iter.next(val_state) length -= 1 - val_iter.next() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], @@ -456,30 +467,30 @@ def fromstring_loop(space, a, dtype, itemsize, s): i = 0 - ai = a.create_iter() - while not ai.done(): + ai, state = a.create_iter() + while not ai.done(state): fromstring_driver.jit_merge_point(dtype=dtype, itemsize=itemsize) sub = s[i*itemsize:i*itemsize + itemsize] if dtype.is_str_or_unicode(): val = dtype.coerce(space, space.wrap(sub)) else: val = dtype.itemtype.runpack_str(space, sub) - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) i += 1 def tostring(space, arr): builder = StringBuilder() - iter = arr.create_iter() + iter, state = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) - while not iter.done(): - w_res_str.implementation.setitem(0, iter.getitem()) + while not iter.done(state): + w_res_str.implementation.setitem(0, iter.getitem(state)) for i in range(itemsize): builder.append(res_str_casted[i]) - iter.next() + state = iter.next(state) return builder.build() getitem_int_driver = jit.JitDriver(name = 'numpy_getitem_int', @@ -500,8 +511,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] res.descr_setitem(space, space.newtuple(prefix_w[:prefixlen] + @@ -528,8 +539,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space, @@ -547,13 +558,14 @@ def byteswap(from_, to): dtype = from_.dtype - from_iter = from_.create_iter() - to_iter = to.create_iter() - while not from_iter.done(): + from_iter, from_state = from_.create_iter() + to_iter, to_state = to.create_iter() + while not from_iter.done(from_state): byteswap_driver.jit_merge_point(dtype=dtype) - to_iter.setitem(dtype.itemtype.byteswap(from_iter.getitem())) - to_iter.next() - from_iter.next() + val = dtype.itemtype.byteswap(from_iter.getitem(from_state)) + to_iter.setitem(to_state, val) + to_state = to_iter.next(to_state) + from_state = from_iter.next(from_state) choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], @@ -561,13 +573,15 @@ def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) - iterators = [a.create_iter(shape) for a in choices] - arr_iter = arr.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + pairs = [a.create_iter(shape) for a in choices] + iterators = [i[0] for i in pairs] + states = [i[1] for i in pairs] + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, mode=mode) - index = support.index_w(space, arr_iter.getitem()) + index = support.index_w(space, arr_iter.getitem(arr_state)) if index < 0 or index >= len(iterators): if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( @@ -580,72 +594,73 @@ index = 0 else: index = len(iterators) - 1 - out_iter.setitem(iterators[index].getitem().convert_to(space, dtype)) - for iter in iterators: - iter.next() - out_iter.next() - arr_iter.next() + val = iterators[index].getitem(states[index]).convert_to(space, dtype) + out_iter.setitem(out_state, val) + for i in range(len(iterators)): + states[i] = iterators[i].next(states[i]) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def clip(space, arr, shape, min, max, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) dtype = out.get_dtype() shapelen = len(shape) - min_iter = min.create_iter(shape) - max_iter = max.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + min_iter, min_state = min.create_iter(shape) + max_iter, max_state = max.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) - w_min = min_iter.getitem().convert_to(space, dtype) - w_max = max_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) + w_min = min_iter.getitem(min_state).convert_to(space, dtype) + w_max = max_iter.getitem(max_state).convert_to(space, dtype) if dtype.itemtype.lt(w_v, w_min): w_v = w_min elif dtype.itemtype.gt(w_v, w_max): w_v = w_max - out_iter.setitem(w_v) - arr_iter.next() - max_iter.next() - out_iter.next() - min_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + min_state = min_iter.next(min_state) + max_state = max_iter.next(max_state) + out_state = out_iter.next(out_state) round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def round(space, arr, dtype, shape, decimals, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + while not arr_iter.done(arr_state): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) w_v = dtype.itemtype.round(w_v, decimals) - out_iter.setitem(w_v) - arr_iter.next() - out_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + out_state = out_iter.next(out_state) diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() i = 0 index = [0] * 2 while i < size: diagonal_simple_driver.jit_merge_point(axis1=axis1, axis2=axis2) index[axis1] = i index[axis2] = i + offset - out_iter.setitem(arr.getitem_index(space, index)) + out_iter.setitem(out_state, arr.getitem_index(space, index)) i += 1 - out_iter.next() + out_state = out_iter.next(out_state) def diagonal_array(space, arr, out, offset, axis1, axis2, shape): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 @@ -667,6 +682,6 @@ indexes = (iter.indexes[:a] + [last_index + offset] + iter.indexes[a:b] + [last_index] + iter.indexes[b:shapelen_minus_1]) - out_iter.setitem(arr.getitem_index(space, indexes)) + out_iter.setitem(out_state, arr.getitem_index(space, indexes)) iter.next() - out_iter.next() + out_state = out_iter.next(out_state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -18,7 +18,7 @@ multi_axis_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator -from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ +from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple @@ -260,24 +260,24 @@ return space.call_function(cache.w_array_str, self) def dump_data(self, prefix='array(', separator=',', suffix=')'): - i = self.create_iter() + i, state = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() s.append(prefix) if not self.is_scalar(): s.append('[') - while not i.done(): + while not i.done(state): if first: first = False else: s.append(separator) s.append(' ') if self.is_scalar() and dtype.is_str(): - s.append(dtype.itemtype.to_str(i.getitem())) + s.append(dtype.itemtype.to_str(i.getitem(state))) else: - s.append(dtype.itemtype.str_format(i.getitem())) - i.next() + s.append(dtype.itemtype.str_format(i.getitem(state))) + state = i.next(state) if not self.is_scalar(): s.append(']') s.append(suffix) @@ -469,29 +469,33 @@ def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) - def to_coords(self, space, w_index): - coords, _, _ = to_coords(space, self.get_shape(), - self.get_size(), self.get_order(), - w_index) - return coords - - def descr_item(self, space, w_arg=None): - if space.is_none(w_arg): + def descr_item(self, space, __args__): + args_w, kw_w = __args__.unpack() + if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): + args_w = space.fixedview(args_w[0]) + shape = self.get_shape() + coords = [0] * len(shape) + if len(args_w) == 0: if self.get_size() == 1: w_obj = self.get_scalar_value() assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, "can only convert an array of size 1 to a Python scalar") - if space.isinstance_w(w_arg, space.w_int): - if self.is_scalar(): - raise oefmt(space.w_IndexError, "index out of bounds") - i = self.to_coords(space, w_arg) - item = self.getitem(space, i) - assert isinstance(item, boxes.W_GenericBox) - return item.item(space) - raise OperationError(space.w_NotImplementedError, space.wrap( - "non-int arg not supported")) + elif len(args_w) == 1 and len(shape) != 1: + value = support.index_w(space, args_w[0]) + value = support.check_and_adjust_index(space, value, self.get_size(), -1) + for idim in range(len(shape) - 1, -1, -1): + coords[idim] = value % shape[idim] + value //= shape[idim] + elif len(args_w) == len(shape): + for idim in range(len(shape)): + coords[idim] = support.index_w(space, args_w[idim]) + else: + raise oefmt(space.w_ValueError, "incorrect number of indices for array") + item = self.getitem(space, coords) + assert isinstance(item, boxes.W_GenericBox) + return item.item(space) def descr_itemset(self, space, args_w): if len(args_w) == 0: @@ -841,8 +845,8 @@ if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - iter = self.create_iter() - return space.wrap(space.is_true(iter.getitem())) + iter, state = self.create_iter() + return space.wrap(space.is_true(iter.getitem(state))) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): @@ -1019,7 +1023,8 @@ descr_cumsum = _reduce_ufunc_impl('add', cumulative=True) descr_cumprod = _reduce_ufunc_impl('multiply', cumulative=True) - def _reduce_argmax_argmin_impl(op_name): + def _reduce_argmax_argmin_impl(raw_name): + op_name = "arg%s" % raw_name def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): raise oefmt(space.w_NotImplementedError, @@ -1030,18 +1035,17 @@ if self.get_size() == 0: raise oefmt(space.w_ValueError, "Can't call %s on zero-size arrays", op_name) - op = getattr(loop, op_name) try: - res = op(self) + getattr(self.get_dtype().itemtype, raw_name) except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', op_name, self.get_dtype().get_name()) - return space.wrap(res) - return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) + return space.wrap(getattr(loop, op_name)(self)) + return func_with_new_name(impl, "reduce_%s_impl" % op_name) - descr_argmax = _reduce_argmax_argmin_impl("argmax") - descr_argmin = _reduce_argmax_argmin_impl("argmin") + descr_argmax = _reduce_argmax_argmin_impl("max") + descr_argmin = _reduce_argmax_argmin_impl("min") def descr_int(self, space): if self.get_size() != 1: @@ -1118,11 +1122,11 @@ builder = StringBuilder() if isinstance(self.implementation, SliceArray): - iter = self.implementation.create_iter() - while not iter.done(): - box = iter.getitem() + iter, state = self.implementation.create_iter() + while not iter.done(state): + box = iter.getitem(state) builder.append(box.raw_str()) - iter.next() + state = iter.next(state) else: builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -1,99 +1,50 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import ufuncs, support, concrete from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement, shape_agreement_multiple) -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator -from pypy.module.micronumpy.concrete import SliceArray -from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy import ufuncs, support + shape_agreement, shape_agreement_multiple) -class AbstractIterator(object): - def done(self): - raise NotImplementedError("Abstract Class") - - def next(self): - raise NotImplementedError("Abstract Class") - - def getitem(self, space, array): - raise NotImplementedError("Abstract Class") - -class IteratorMixin(object): - _mixin_ = True - def __init__(self, it, op_flags): - self.it = it - self.op_flags = op_flags - - def done(self): - return self.it.done() - - def next(self): - self.it.next() - - def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, array, self.it) - - def setitem(self, space, array, val): - xxx - -class BoxIterator(IteratorMixin, AbstractIterator): - index = 0 - -class ExternalLoopIterator(IteratorMixin, AbstractIterator): - index = 1 - def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + if space.is_w(w_op_flags, space.w_None): + w_op_flags = space.newtuple([space.wrap('readonly')]) + if not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) ret = [] - if space.is_w(w_op_flags, space.w_None): + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) for i in range(n): - ret.append(OpFlag()) - elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ - space.isinstance_w(w_op_flags, space.w_list): - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - else: - w_lst = space.listview(w_op_flags) - if space.isinstance_w(w_lst[0], space.w_tuple) or \ - space.isinstance_w(w_lst[0], space.w_list): - if len(w_lst) != n: - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - for item in w_lst: - ret.append(parse_one_arg(space, space.listview(item))) - else: - op_flag = parse_one_arg(space, w_lst) - for i in range(n): - ret.append(op_flag) + ret.append(op_flag) return ret + class OpFlag(object): def __init__(self): - self.rw = 'r' + self.rw = '' self.broadcast = True self.force_contig = False self.force_align = False self.native_byte_order = False self.tmp_copy = '' self.allocate = False - self.get_it_item = (get_readonly_item, get_readonly_slice) -def get_readonly_item(space, array, it): - return space.wrap(it.getitem()) - -def get_readwrite_item(space, array, it): - #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) - #it.dtype.setitem(res, 0, it.getitem()) - return W_NDimArray(res) - -def get_readonly_slice(space, array, it): - return W_NDimArray(it.getslice().readonly()) - -def get_readwrite_slice(space, array, it): - return W_NDimArray(it.getslice()) def parse_op_flag(space, lst): op_flag = OpFlag() @@ -121,39 +72,38 @@ op_flag.allocate = True elif item == 'no_subtype': raise OperationError(space.w_NotImplementedError, space.wrap( - '"no_subtype" op_flag not implemented yet')) + '"no_subtype" op_flag not implemented yet')) elif item == 'arraymask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"arraymask" op_flag not implemented yet')) + '"arraymask" op_flag not implemented yet')) elif item == 'writemask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"writemask" op_flag not implemented yet')) + '"writemask" op_flag not implemented yet')) else: raise OperationError(space.w_ValueError, space.wrap( - 'op_flags must be a tuple or array of per-op flag-tuples')) - if op_flag.rw == 'r': - op_flag.get_it_item = (get_readonly_item, get_readonly_slice) - elif op_flag.rw == 'rw': - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) - elif op_flag.rw == 'w': - # XXX Extra logic needed to make sure writeonly - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == '': + raise oefmt(space.w_ValueError, + "None of the iterator flags READWRITE, READONLY, or " + "WRITEONLY were specified for an operand") return op_flag + def parse_func_flags(space, nditer, w_flags): if space.is_w(w_flags, space.w_None): return elif not space.isinstance_w(w_flags, space.w_tuple) and not \ - space.isinstance_w(w_flags, space.w_list): + space.isinstance_w(w_flags, space.w_list): raise OperationError(space.w_ValueError, space.wrap( - 'Iter global flags must be a list or tuple of strings')) + 'Iter global flags must be a list or tuple of strings')) lst = space.listview(w_flags) for w_item in lst: if not space.isinstance_w(w_item, space.w_str) and not \ - space.isinstance_w(w_item, space.w_unicode): + space.isinstance_w(w_item, space.w_unicode): typename = space.type(w_item).getname(space) - raise OperationError(space.w_TypeError, space.wrap( - 'expected string or Unicode object, %s found' % typename)) + raise oefmt(space.w_TypeError, + 'expected string or Unicode object, %s found', + typename) item = space.str_w(w_item) if item == 'external_loop': raise OperationError(space.w_NotImplementedError, space.wrap( @@ -187,21 +137,24 @@ elif item == 'zerosize_ok': nditer.zerosize_ok = True else: - raise OperationError(space.w_ValueError, space.wrap( - 'Unexpected iterator global flag "%s"' % item)) + raise oefmt(space.w_ValueError, + 'Unexpected iterator global flag "%s"', + item) if nditer.tracked_index and nditer.external_loop: - raise OperationError(space.w_ValueError, space.wrap( - 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' - 'multi-index is being tracked')) + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + def is_backward(imp, order): if order == 'K' or (order == 'C' and imp.order == 'C'): return False - elif order =='F' and imp.order == 'C': + elif order == 'F' and imp.order == 'C': return True else: raise NotImplementedError('not implemented yet') + def get_iter(space, order, arr, shape, dtype): imp = arr.implementation backward = is_backward(imp, order) @@ -223,19 +176,6 @@ shape, backward) return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) -def get_external_loop_iter(space, order, arr, shape): - imp = arr.implementation - backward = is_backward(imp, order) - return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) - -def convert_to_array_or_none(space, w_elem): - ''' - None will be passed through, all others will be converted - ''' - if space.is_none(w_elem): - return None - return convert_to_array(space, w_elem) - class IndexIterator(object): def __init__(self, shape, backward=False): @@ -263,10 +203,10 @@ ret += self.index[i] * self.shape[i - 1] return ret + class W_NDIter(W_Root): - def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, - w_op_axes, w_itershape, w_buffersize, order): + w_op_axes, w_itershape, w_buffersize, order): self.order = order self.external_loop = False self.buffered = False @@ -286,9 +226,11 @@ if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) - self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + self.seq = [convert_to_array(space, w_elem) + if not space.is_none(w_elem) else None + for w_elem in w_seq_as_list] else: - self.seq =[convert_to_array(space, w_seq)] + self.seq = [convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, @@ -308,9 +250,9 @@ self.dtypes = [] # handle None or writable operands, calculate my shape - self.iters=[] - outargs = [i for i in range(len(self.seq)) \ - if self.seq[i] is None or self.op_flags[i].rw == 'w'] + self.iters = [] + outargs = [i for i in range(len(self.seq)) + if self.seq[i] is None or self.op_flags[i].rw == 'w'] if len(outargs) > 0: out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: @@ -325,14 +267,12 @@ out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: - self.op_flags[i].get_it_item = (get_readwrite_item, - get_readwrite_slice) self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue - out_dtype = ufuncs.find_binop_result_dtype(space, - self.seq[i].get_dtype(), out_dtype) + out_dtype = ufuncs.find_binop_result_dtype( + space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? @@ -360,8 +300,9 @@ self.dtypes[i] = seq_d elif selfd != seq_d: if not 'r' in self.op_flags[i].tmp_copy: - raise OperationError(space.w_TypeError, space.wrap( - "Iterator operand required copying or buffering for operand %d" % i)) + raise oefmt(space.w_TypeError, + "Iterator operand required copying or " + "buffering for operand %d", i) impl = self.seq[i].implementation new_impl = impl.astype(space, selfd) self.seq[i] = W_NDimArray(new_impl) @@ -370,18 +311,14 @@ self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand - if self.external_loop: - for i in range(len(self.seq)): - self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, - self.seq[i], iter_shape), self.op_flags[i])) - else: - for i in range(len(self.seq)): - self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i], iter_shape, self.dtypes[i]), - self.op_flags[i])) + for i in range(len(self.seq)): + it = get_iter(space, self.order, self.seq[i], iter_shape, self.dtypes[i]) + self.iters.append((it, it.reset())) + def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): - raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + raise oefmt(space.w_ValueError, + "op_axes must be a tuple/list matching the number of ops") op_axes = space.listview(w_op_axes) l = -1 for w_axis in op_axes: @@ -390,10 +327,14 @@ if l == -1: l = axis_len elif axis_len != l: - raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) - self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + raise oefmt(space.w_ValueError, + "Each entry of op_axes must have the same size") + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 + for x in space.listview(w_axis)]) if l == -1: - raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise oefmt(space.w_ValueError, + "If op_axes is provided, at least one list of axes " + "must be contained within it") raise Exception('xxx TODO') # Check that values make sense: # - in bounds for each operand @@ -404,24 +345,34 @@ def descr_iter(self, space): return space.wrap(self) + def getitem(self, it, st, op_flags): + if op_flags.rw == 'r': + impl = concrete.ConcreteNonWritableArrayWithBase + else: + impl = concrete.ConcreteArrayWithBase + res = impl([], it.array.dtype, it.array.order, [], [], + it.array.storage, self) + res.start = st.offset + return W_NDimArray(res) + def descr_getitem(self, space, w_idx): idx = space.int_w(w_idx) try: - ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + it, st = self.iters[idx] except IndexError: - raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) - return ret + raise oefmt(space.w_IndexError, + "Iterator operand index %d is out of bounds", idx) + return self.getitem(it, st, self.op_flags[idx]) def descr_setitem(self, space, w_idx, w_value): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_len(self, space): space.wrap(len(self.iters)) def descr_next(self, space): - for it in self.iters: - if not it.done(): + for it, st in self.iters: + if not it.done(st): break else: self.done = True @@ -432,20 +383,20 @@ self.index_iter.next() else: self.first_next = False - for i in range(len(self.iters)): - res.append(self.iters[i].getitem(space, self.seq[i])) - self.iters[i].next() - if len(res) <2: + for i, (it, st) in enumerate(self.iters): + res.append(self.getitem(it, st, self.op_flags[i])) + self.iters[i] = (it, it.next(st)) + if len(res) < 2: return res[0] return space.newtuple(res) def iternext(self): if self.index_iter: self.index_iter.next() - for i in range(len(self.iters)): - self.iters[i].next() - for it in self.iters: - if not it.done(): + for i, (it, st) in enumerate(self.iters): + self.iters[i] = (it, it.next(st)) + for it, st in self.iters: + if not it.done(st): break else: self.done = True @@ -456,29 +407,23 @@ return space.wrap(self.iternext()) def descr_copy(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_debug_print(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_enable_external_loop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") @unwrap_spec(axis=int) def descr_remove_axis(self, space, axis): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_remove_multi_index(self, space, w_multi_index): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_reset(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_operands(self, space): l_w = [] @@ -496,17 +441,16 @@ return space.wrap(self.done) def descr_get_has_delayed_bufalloc(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_has_index(self, space): return space.wrap(self.tracked_index in ["C", "F"]) def descr_get_index(self, space): if not self.tracked_index in ["C", "F"]: - raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + raise oefmt(space.w_ValueError, "Iterator does not have an index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.wrap(self.index_iter.getvalue()) def descr_get_has_multi_index(self, space): @@ -514,51 +458,44 @@ def descr_get_multi_index(self, space): if not self.tracked_index == "multi": - raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + raise oefmt(space.w_ValueError, "Iterator is not tracking a multi-index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.newtuple([space.wrap(x) for x in self.index_iter.index]) def descr_get_iterationneedsapi(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_iterindex(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_itersize(self, space): return space.wrap(support.product(self.shape)) def descr_get_itviews(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_ndim(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_nop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_shape(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_value(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") - at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), - w_op_dtypes = WrappedDefault(None), order=str, + at unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): + w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order) + w_itershape, w_buffersize, order) W_NDIter.typedef = TypeDef( 'nditer', diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -148,20 +148,22 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() index_impl = index_arr.implementation index_iter = AllButAxisIter(index_impl, axis) + index_state = index_iter.reset() stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): + while not arr_iter.done(arr_state): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + - index_iter.offset, i) + index_state.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, arr_iter.offset) + arr.get_storage(), storage, index_state.offset, arr_state.offset) ArgSort(r).sort() - arr_iter.next() - index_iter.next() + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) return index_arr return argsort @@ -292,12 +294,13 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) + while not arr_iter.done(arr_state): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_state.offset) ArgSort(r).sort() - arr_iter.next() + arr_state = arr_iter.next(arr_state) return sort diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -233,30 +233,6 @@ return dtype -def to_coords(space, shape, size, order, w_item_or_slice): - '''Returns a start coord, step, and length. - ''' - start = lngth = step = 0 - if not (space.isinstance_w(w_item_or_slice, space.w_int) or - space.isinstance_w(w_item_or_slice, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) - - start, stop, step, lngth = space.decode_index4(w_item_or_slice, size) - - coords = [0] * len(shape) - i = start - if order == 'C': - for s in range(len(shape) -1, -1, -1): - coords[s] = i % shape[s] - i //= shape[s] - else: - for s in range(len(shape)): - coords[s] = i % shape[s] - i //= shape[s] - return coords, step, lngth - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -25,3 +25,18 @@ for x in s: i *= x return i + + +def check_and_adjust_index(space, index, size, axis): + if index < -size or index >= size: + if axis >= 0: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for axis %d with size %d", + index, axis, size) + else: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for size %d", + index, size) + if index < 0: + index += size + return index diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -16,17 +16,18 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 3 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 3 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 5 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 5 + assert s.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -35,17 +36,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 9 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 9 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 1 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 1 + assert s.indices == [1,0] def test_iterator_step(self): #iteration in C order with #contiguous layout => strides[-1] is 1 @@ -56,22 +58,23 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 6 - assert not i.done() - assert i.indices == [1,1] + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 6 + assert not i.done(s) + assert s.indices == [1,1] #And for some big skips - i.next_skip_x(5) - assert i.offset == 11 - assert i.indices == [2,1] - i.next_skip_x(5) + s = i.next_skip_x(s, 5) + assert s.offset == 11 + assert s.indices == [2,1] + s = i.next_skip_x(s, 5) # Note: the offset does not overflow but recycles, # this is good for broadcast - assert i.offset == 1 - assert i.indices == [0,1] - assert i.done() + assert s.offset == 1 + assert s.indices == [0,1] + assert i.done(s) #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -80,17 +83,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 4 - assert i.indices == [1,1] - assert not i.done() - i.next_skip_x(5) - assert i.offset == 5 - assert i.indices == [2,1] - assert not i.done() - i.next_skip_x(5) - assert i.indices == [0,1] - assert i.offset == 3 - assert i.done() + s = i.reset() + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + s = i.next_skip_x(s, 2) + assert s.offset == 4 + assert s.indices == [1,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.offset == 5 + assert s.indices == [2,1] + assert not i.done(s) + s = i.next_skip_x(s, 5) + assert s.indices == [0,1] + assert s.offset == 3 + assert i.done(s) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -164,24 +164,6 @@ assert calc_new_strides([1, 1, 105, 1, 1], [7, 15], [1, 7],'F') == \ [1, 1, 1, 105, 105] - def test_to_coords(self): - from pypy.module.micronumpy.strides import to_coords - From noreply at buildbot.pypy.org Sat Apr 19 23:53:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:34 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: pass tests Message-ID: <20140419215334.880CF1C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70789:dfbf07a1af36 Date: 2014-04-20 00:38 +0300 http://bitbucket.org/pypy/pypy/changeset/dfbf07a1af36/ Log: pass tests diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -712,8 +712,6 @@ else: raise oefmt(space.w_ValueError, "'%s' is an invalid value for keyword 'side'", side) - ret = W_NDimArray.from_shape(space, self.get_shape(), - descriptor.get_dtype_cache(space).w_longdtype) if len(self.get_shape()) > 1: raise OperationError(space.w_ValueError, space.wrap( "a must be a 1-d array")) @@ -721,6 +719,8 @@ if len(v.get_shape()) >1: raise OperationError(space.w_ValueError, space.wrap( "v must be a 1-d array-like")) + ret = W_NDimArray.from_shape(space, v.get_shape(), + descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) return ret @@ -1282,7 +1282,7 @@ imax = a.size while imin < imax: imid = imin + ((imax - imin) >> 1) - if a[imid] <= val: + if a[imid] < val: imin = imid +1 else: imax = imid @@ -1292,7 +1292,7 @@ imax = a.size while imin < imax: imid = imin + ((imax - imin) >> 1) - if a[imid] < val: + if a[imid] <= val: imin = imid +1 else: imax = imid @@ -1301,8 +1301,11 @@ func = left_find_index else: func = right_find_index - for i in range(v.size): - result[i] = func(arr, v[i]) + if v.size < 2: + result[...] = func(arr, v) + else: + for i in range(v.size): + result[i] = func(arr, v[i]) return result """, filename=__file__).interphook('searchsort') diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -361,5 +361,5 @@ ret = a.searchsorted([-10, 10, 2, 3]) assert (ret == [0, 5, 1, 2]).all() if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, "a.searchsorted(3, sorter=range(6)") + raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))") From noreply at buildbot.pypy.org Sat Apr 19 23:53:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:35 +0200 (CEST) Subject: [pypy-commit] pypy numpy-speed: close merged branch Message-ID: <20140419215335.979EC1C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-speed Changeset: r70790:5930d01b5418 Date: 2014-04-20 00:46 +0300 http://bitbucket.org/pypy/pypy/changeset/5930d01b5418/ Log: close merged branch From noreply at buildbot.pypy.org Sat Apr 19 23:53:36 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:36 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: document branch Message-ID: <20140419215336.AA27A1C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70791:7d8437da31e2 Date: 2014-04-20 00:49 +0300 http://bitbucket.org/pypy/pypy/changeset/7d8437da31e2/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -143,3 +143,6 @@ .. branch: numpy-speed Separate iterator from its state so jit can optimize better + +.. branch: numpypy-searchsorted +Implement searchsorted without sorter kwarg From noreply at buildbot.pypy.org Sat Apr 19 23:53:37 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:37 +0200 (CEST) Subject: [pypy-commit] pypy numpy-searchsorted: close branch to be merged Message-ID: <20140419215337.B70A81C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-searchsorted Changeset: r70792:fc7c95864c77 Date: 2014-04-20 00:50 +0300 http://bitbucket.org/pypy/pypy/changeset/fc7c95864c77/ Log: close branch to be merged From noreply at buildbot.pypy.org Sat Apr 19 23:53:38 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 19 Apr 2014 23:53:38 +0200 (CEST) Subject: [pypy-commit] pypy default: merge numpy-searchsorted which tests and implements searchsorted for ndarrays Message-ID: <20140419215338.E86201C02D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70793:e86508502063 Date: 2014-04-20 00:52 +0300 http://bitbucket.org/pypy/pypy/changeset/e86508502063/ Log: merge numpy-searchsorted which tests and implements searchsorted for ndarrays diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -143,3 +143,6 @@ .. branch: numpy-speed Separate iterator from its state so jit can optimize better + +.. branch: numpy-searchsorted +Implement searchsorted without sorter kwarg diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -22,3 +22,4 @@ arr[j] = i i += step return arr + diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -697,9 +697,32 @@ loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out - def descr_searchsorted(self, space, w_v, w_side='left'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "searchsorted not implemented yet")) + @unwrap_spec(side=str, w_sorter=WrappedDefault(None)) + def descr_searchsorted(self, space, w_v, side='left', w_sorter=None): + if not space.is_none(w_sorter): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'sorter not supported in searchsort')) + if not side or len(side) < 1: + raise OperationError(space.w_ValueError, space.wrap( + "expected nonempty string for keyword 'side'")) + elif side[0] == 'l' or side[0] == 'L': + side = 'l' + elif side[0] == 'r' or side[0] == 'R': + side = 'r' + else: + raise oefmt(space.w_ValueError, + "'%s' is an invalid value for keyword 'side'", side) + if len(self.get_shape()) > 1: + raise OperationError(space.w_ValueError, space.wrap( + "a must be a 1-d array")) + v = convert_to_array(space, w_v) + if len(v.get_shape()) >1: + raise OperationError(space.w_ValueError, space.wrap( + "v must be a 1-d array-like")) + ret = W_NDimArray.from_shape(space, v.get_shape(), + descriptor.get_dtype_cache(space).w_longdtype) + app_searchsort(space, self, v, space.wrap(side), ret) + return ret def descr_setasflat(self, space, w_v): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -1252,6 +1275,40 @@ return res """, filename=__file__).interphook('ptp') +app_searchsort = applevel(r""" + def searchsort(arr, v, side, result): + def left_find_index(a, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if a[imid] < val: + imin = imid +1 + else: + imax = imid + return imin + def right_find_index(a, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if a[imid] <= val: + imin = imid +1 + else: + imax = imid + return imin + if side == 'l': + func = left_find_index + else: + func = right_find_index + if v.size < 2: + result[...] = func(arr, v) + else: + for i in range(v.size): + result[i] = func(arr, v[i]) + return result +""", filename=__file__).interphook('searchsort') + W_NDimArray.typedef = TypeDef("ndarray", __module__ = "numpy", __new__ = interp2app(descr_new_array), @@ -1355,6 +1412,7 @@ dot = interp2app(W_NDimArray.descr_dot), var = interp2app(W_NDimArray.descr_var), std = interp2app(W_NDimArray.descr_std), + searchsorted = interp2app(W_NDimArray.descr_searchsorted), cumsum = interp2app(W_NDimArray.descr_cumsum), cumprod = interp2app(W_NDimArray.descr_cumprod), diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -341,7 +341,7 @@ assert (x == y).all() def test_string_mergesort(self): - import numpypy as np + import numpy as np import sys x = np.array(['a'] * 32) if '__pypy__' in sys.builtin_module_names: @@ -349,3 +349,17 @@ assert 'non-numeric types' in exc.value.message else: assert (x.argsort(kind='m') == np.arange(32)).all() + + def test_searchsort(self): + from numpy import arange + import sys + a = arange(1, 6) + ret = a.searchsorted(3) + assert ret == 2 + ret = a.searchsorted(3, side='right') + assert ret == 3 + ret = a.searchsorted([-10, 10, 2, 3]) + assert (ret == [0, 5, 1, 2]).all() + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))") + From noreply at buildbot.pypy.org Sun Apr 20 07:55:53 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 20 Apr 2014 07:55:53 +0200 (CEST) Subject: [pypy-commit] pypy default: catch errors in thread Message-ID: <20140420055553.1029F1C0144@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70794:b40fc1b7ce82 Date: 2014-04-20 08:55 +0300 http://bitbucket.org/pypy/pypy/changeset/b40fc1b7ce82/ Log: catch errors in thread diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -164,8 +164,12 @@ s2 = RSocket(AF_INET, SOCK_STREAM) s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test def connecting(): - s2.connect(addr) - lock.release() + try: + s2.connect(addr) + except: + s2.close() + finally: + lock.release() lock = thread.allocate_lock() lock.acquire() thread.start_new_thread(connecting, ()) From noreply at buildbot.pypy.org Sun Apr 20 08:30:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 20 Apr 2014 08:30:33 +0200 (CEST) Subject: [pypy-commit] pypy default: using timeout untranslated sets errno, failing test Message-ID: <20140420063033.805E21C0543@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70795:008af392cc13 Date: 2014-04-20 09:30 +0300 http://bitbucket.org/pypy/pypy/changeset/008af392cc13/ Log: using timeout untranslated sets errno, failing test diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -162,7 +162,10 @@ assert addr.eq(sock.getsockname()) sock.listen(1) s2 = RSocket(AF_INET, SOCK_STREAM) - s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test + if sys.platform != 'win32': + # test one side with timeouts so select is used + # XXX fix on win32 + s2.settimeout(10.0) def connecting(): try: s2.connect(addr) From noreply at buildbot.pypy.org Sun Apr 20 08:44:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 20 Apr 2014 08:44:06 +0200 (CEST) Subject: [pypy-commit] pypy default: skip crashing test on windows Message-ID: <20140420064406.F3AF81D2411@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70796:ba4e5b432b24 Date: 2014-04-20 09:43 +0300 http://bitbucket.org/pypy/pypy/changeset/ba4e5b432b24/ Log: skip crashing test on windows diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -745,6 +745,7 @@ def test_get_errno(self): eci = ExternalCompilationInfo(includes=['string.h']) if sys.platform.startswith('win'): + py.test.skip('writing to invalid fd on windows crashes the process') # Note that cpython before 2.7 installs an _invalid_parameter_handler, # which is why the test passes there, but this is no longer # accepted practice. From noreply at buildbot.pypy.org Sun Apr 20 10:11:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 10:11:36 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Make the stack markers odd (seems more useful for pypy) Message-ID: <20140420081136.331EE1C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1171:caccd9ab4c7c Date: 2014-04-20 10:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/caccd9ab4c7c/ Log: Make the stack markers odd (seems more useful for pypy) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -271,8 +271,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) -#define STM_STACK_MARKER_NEW 2 -#define STM_STACK_MARKER_OLD 6 +#define STM_STACK_MARKER_NEW (-41) +#define STM_STACK_MARKER_OLD (-43) /* Every thread needs to have a corresponding stm_thread_local_t From noreply at buildbot.pypy.org Sun Apr 20 10:11:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 10:11:37 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Look for markers until one expand_marker() calls return a non-empty string Message-ID: <20140420081137.334E11C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1172:a07a3f22d422 Date: 2014-04-20 10:09 +0200 http://bitbucket.org/pypy/stmgc/changeset/a07a3f22d422/ Log: Look for markers until one expand_marker() calls return a non-empty string diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -27,11 +27,8 @@ stmcb_expand_marker(pseg->pub.segment_base, x, current[1].ss, pseg->marker_self, _STM_MARKER_LEN); - if (pseg->marker_self[0] == 0) { - pseg->marker_self[0] = '?'; - pseg->marker_self[1] = 0; - } - break; + if (pseg->marker_self[0] != 0) + break; } } } diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -153,3 +153,25 @@ self.abort_transaction() # assert seen == [("run aborted other", "time_ok", "<<<29>>>")] + + def test_multiple_markers(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + seen.append(number) + if ptr == ffi.NULL: + return + s = '%d %r\x00' % (number, ptr) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + seen = [] + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 27)) + self.push_root(p) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + raw = lib._stm_expand_marker() + assert ffi.string(raw) == '27 %r' % (p,) + assert seen == [29, 27] From noreply at buildbot.pypy.org Sun Apr 20 10:11:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 10:11:38 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Fix Message-ID: <20140420081138.2D15D1C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1173:a4c30401045f Date: 2014-04-20 10:09 +0200 http://bitbucket.org/pypy/stmgc/changeset/a4c30401045f/ Log: Fix diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -44,7 +44,7 @@ visit((object_t **)&n->next); } -static void expand_marker(uintptr_t odd_number, +static void expand_marker(char *base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize) { From noreply at buildbot.pypy.org Sun Apr 20 10:54:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 10:54:12 +0200 (CEST) Subject: [pypy-commit] stmgc marker: bug fix Message-ID: <20140420085412.524C91C0543@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1174:889897f2f5ef Date: 2014-04-20 10:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/889897f2f5ef/ Log: bug fix diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -20,7 +20,9 @@ stm_thread_local_t *tl = pseg->pub.running_thread; struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; - while (--current >= base) { + /* stop walking just before shadowstack_base, which contains + STM_STACK_MARKER_OLD which shouldn't be expanded */ + while (--current > base) { uintptr_t x = (uintptr_t)current->ss; if (x & 1) { /* the stack entry is an odd number */ From noreply at buildbot.pypy.org Sun Apr 20 13:47:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 13:47:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/a4c30401045f (branch "marker") Message-ID: <20140420114759.69FD51C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70798:53dcf7d51a17 Date: 2014-04-20 10:12 +0200 http://bitbucket.org/pypy/pypy/changeset/53dcf7d51a17/ Log: import stmgc/a4c30401045f (branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -7660960de054 +a4c30401045f diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -28,11 +28,8 @@ stmcb_expand_marker(pseg->pub.segment_base, x, current[1].ss, pseg->marker_self, _STM_MARKER_LEN); - if (pseg->marker_self[0] == 0) { - pseg->marker_self[0] = '?'; - pseg->marker_self[1] = 0; - } - break; + if (pseg->marker_self[0] != 0) + break; } } } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -272,8 +272,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) -#define STM_STACK_MARKER_NEW 2 -#define STM_STACK_MARKER_OLD 6 +#define STM_STACK_MARKER_NEW (-41) +#define STM_STACK_MARKER_OLD (-43) /* Every thread needs to have a corresponding stm_thread_local_t From noreply at buildbot.pypy.org Sun Apr 20 13:48:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 13:48:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Revert these changes, probably not necessary Message-ID: <20140420114800.A5F161C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70799:2dee4269a986 Date: 2014-04-20 11:09 +0200 http://bitbucket.org/pypy/pypy/changeset/2dee4269a986/ Log: Revert these changes, probably not necessary diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -46,15 +46,9 @@ # ____________________________________________________________ -class PyPyJitDriver(jit.JitDriver): - reds = ['frame', 'ec'] - greens = ['next_instr', 'is_being_profiled', 'pycode'] - virtualizables = ['frame'] - stm_do_transaction_breaks = True - is_main_for_pypy = True # XXX temporary: turning 'greens' into a string - # is hard-coded in C code. Don't change 'greens' - -stmonly_jitdriver = PyPyJitDriver() +stmonly_jitdriver = jit.JitDriver(greens=[], reds=['next_instr', 'ec', + 'self', 'co_code'], + stm_do_transaction_breaks=True) # ____________________________________________________________ @@ -77,9 +71,8 @@ # only used for no-jit. The jit-jitdriver is # in interp_jit.py stmonly_jitdriver.jit_merge_point( - frame=self, pycode=co_code, - next_instr=next_instr, ec=ec, - is_being_profiled=self.is_being_profiled) + self=self, co_code=co_code, + next_instr=next_instr, ec=ec) next_instr = self.handle_bytecode(co_code, next_instr, ec) rstm.update_marker_num(intmask(next_instr) * 2 + 1) except ExitFrame: diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,7 +12,6 @@ from pypy.interpreter.pycode import CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield -from pypy.interpreter.pyopcode import PyPyJitDriver from opcode import opmap @@ -37,10 +36,16 @@ def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 +class PyPyJitDriver(JitDriver): + reds = ['frame', 'ec'] + greens = ['next_instr', 'is_being_profiled', 'pycode'] + virtualizables = ['frame'] + pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, should_unroll_one_iteration = should_unroll_one_iteration, - name='pypyjit') + name='pypyjit', + stm_do_transaction_breaks=True) class __extend__(PyFrame): From noreply at buildbot.pypy.org Sun Apr 20 13:48:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 13:48:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: update to stmgc/889897f2f5ef (branch "marker") Message-ID: <20140420114801.CC3D81C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70800:7f2b59d09a77 Date: 2014-04-20 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/7f2b59d09a77/ Log: update to stmgc/889897f2f5ef (branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -a4c30401045f +889897f2f5ef diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -21,7 +21,9 @@ stm_thread_local_t *tl = pseg->pub.running_thread; struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; - while (--current >= base) { + /* stop walking just before shadowstack_base, which contains + STM_STACK_MARKER_OLD which shouldn't be expanded */ + while (--current > base) { uintptr_t x = (uintptr_t)current->ss; if (x & 1) { /* the stack entry is an odd number */ From noreply at buildbot.pypy.org Sun Apr 20 13:48:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 13:48:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: In stm mode, stick the 'next_instr' and the 'pycode' arguments (an int Message-ID: <20140420114803.0360B1C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70801:8fb2e2faf420 Date: 2014-04-20 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/8fb2e2faf420/ Log: In stm mode, stick the 'next_instr' and the 'pycode' arguments (an int and a ref from the greenkey) on the guard's descrs. diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1039,6 +1039,9 @@ def execute_stm_transaction_break(self, _, really_wanted): pass + def execute_stm_set_location(self, _, int, ref): + pass + def execute_keepalive(self, descr, x): pass diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -515,10 +515,12 @@ TY_REF = 0x04 TY_FLOAT = 0x06 - def store_final_boxes(self, guard_op, boxes, metainterp_sd): + def store_final_boxes(self, guard_op, boxes, metainterp_sd, stm_location): guard_op.setfailargs(boxes) self.rd_count = len(boxes) self.guard_opnum = guard_op.getopnum() + if stm_location is not None: # constant-folded + self.stm_location_int, self.stm_location_ref = stm_location # if metainterp_sd.warmrunnerdesc is not None: # for tests jitcounter = metainterp_sd.warmrunnerdesc.jitcounter diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -346,6 +346,7 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.LABEL, rop.STM_READ, + rop.STM_SET_LOCATION, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/jitdriver.py b/rpython/jit/metainterp/jitdriver.py --- a/rpython/jit/metainterp/jitdriver.py +++ b/rpython/jit/metainterp/jitdriver.py @@ -18,6 +18,7 @@ # self.warmstate ... rpython.jit.metainterp.warmspot # self.handle_jitexc_from_bh rpython.jit.metainterp.warmspot # self.no_loop_header ... rpython.jit.metainterp.warmspot + # self.stm_report_location.. rpython.jit.metainterp.warmspot # self.portal_finishtoken... rpython.jit.metainterp.pyjitpl # self.propagate_exc_descr.. rpython.jit.metainterp.pyjitpl # self.index ... rpython.jit.codewriter.call diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -360,6 +360,12 @@ self.call_pure_results = loop.call_pure_results self.stm_info = loop.stm_info + if metainterp_sd.config.translation.stm: + from rpython.rtyper.lltypesystem import lltype, llmemory + self.stm_location = (0, lltype.nullptr(llmemory.GCREF.TO)) + else: + self.stm_location = None + self.set_optimizations(optimizations) self.setup() @@ -571,7 +577,8 @@ raise resume.TagOverflow except resume.TagOverflow: raise compile.giveup() - descr.store_final_boxes(op, newboxes, self.metainterp_sd) + descr.store_final_boxes(op, newboxes, self.metainterp_sd, + self.stm_location) # if op.getopnum() == rop.GUARD_VALUE: if self.getvalue(op.getarg(0)) in self.bool_boxes: diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -83,13 +83,18 @@ self._set_break_wanted(True) self.keep_but_ignore_gnf = False self.emit_operation(op) - - + + def optimize_DEBUG_MERGE_POINT(self, op): + jdindex = op.getarg(0).getint() + jd = self.optimizer.metainterp_sd.warmrunnerdesc.jitdrivers_sd[jdindex] + report_location = jd.stm_report_location + if report_location is not None: + idx_num, idx_ref = report_location + num = op.getarg(3 + idx_num).getint() + ref = op.getarg(3 + idx_ref).getref_base() + self.optimizer.stm_location = (num, ref) + self.emit_operation(op) + dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', default=OptSTM.default_emit) - - - - - diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2077,6 +2077,14 @@ deadframe) if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(Counters.ABORT_BRIDGE) + # + if (self.staticdata.config.translation.stm and + isinstance(key, compile.ResumeGuardDescr)): + self.history.record(rop.STM_SET_LOCATION, + [ConstInt(key.stm_location_int), + ConstPtr(key.stm_location_ref)], + None) + # self.interpret() except SwitchToBlackhole, stb: self.run_blackhole_interp_to_cancel_tracing(stb) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -504,6 +504,7 @@ 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) 'DEBUG_MERGE_POINT/*', # debugging only 'JIT_DEBUG/*', # debugging only + 'STM_SET_LOCATION/2', 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -65,7 +65,62 @@ 'guard_no_exception':1, 'call_may_force':1}) + def test_debug_merge_points(self): + myjitdriver = JitDriver(greens = ['a'], reds = ['x', 'res']) + def g(a, x): + res = 0 + while x > 0: + myjitdriver.jit_merge_point(a=a, x=x, res=res) + res += x + x -= 1 + a = -a + return res + res = self.meta_interp(g, [42, 10], translationoptions={"stm":True}) + assert res == 55 + self.check_resops(debug_merge_point=6) + # + from rpython.jit.metainterp.warmspot import get_stats + loops = get_stats().get_all_loops() + assert len(loops) == 1 + got = [] + for op in loops[0]._all_operations(): + if op.getopname() == "debug_merge_point": + got.append(op.getarglist()[-1].value) + assert got == [42, -42, 42, 42, -42, 42] + def test_stm_report_location(self): + myjitdriver = JitDriver(greens = ['a', 'r'], reds = ['x', 'res'], + stm_report_location = [0, 1]) + class Code(object): + pass + def g(a, r, x): + res = 0 + while x > 0: + myjitdriver.jit_merge_point(a=a, r=r, x=x, res=res) + res += x + x -= 1 + a = -a + return res + def main(a, x): + r = Code() + res = -1 + n = 7 + while n > 0: + res = g(a, r, x) + n -= 1 + return res + res = self.meta_interp(main, [42, 10], translationoptions={"stm":True}) + assert res == 55 + self.check_resops(debug_merge_point=6) + # + from rpython.jit.metainterp.warmspot import get_stats + seen = [] + for loop in get_stats().get_all_loops(): + for op in loop._all_operations(): + if op.getopname() == "stm_set_location": + seen.append(op) + [op] = seen + assert op.getarg(0).getint() == -42 class TestLLtype(STMTests, LLJitMixin): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -389,6 +389,7 @@ graph.func._dont_inline_ = True graph.func._jit_unroll_safe_ = True jd.jitdriver = block.operations[pos].args[1].value + jd.stm_report_location = jd.jitdriver.stm_report_location jd.portal_runner_ptr = "" jd.result_type = history.getkind(jd.portal_graph.getreturnvar() .concretetype)[0] diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -484,13 +484,15 @@ inline_jit_merge_point = False _store_last_enter_jit = None stm_do_transaction_breaks = False + stm_report_location = None def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, name='jitdriver', check_untranslated=True, - stm_do_transaction_breaks=None): + stm_do_transaction_breaks=None, + stm_report_location=None): if greens is not None: self.greens = greens self.name = name @@ -528,6 +530,8 @@ self.check_untranslated = check_untranslated if stm_do_transaction_breaks is not None: self.stm_do_transaction_breaks = stm_do_transaction_breaks + if stm_report_location is not None: + self.stm_report_location = stm_report_location def _freeze_(self): return True From noreply at buildbot.pypy.org Sun Apr 20 13:48:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 13:48:04 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: The point of the 'stm_set_location' operation. Message-ID: <20140420114804.1C4B41C0144@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70802:25d8b67514fd Date: 2014-04-20 13:47 +0200 http://bitbucket.org/pypy/pypy/changeset/25d8b67514fd/ Log: The point of the 'stm_set_location' operation. diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -95,6 +95,12 @@ self.optimizer.stm_location = (num, ref) self.emit_operation(op) + def optimize_STM_SET_LOCATION(self, op): + num = op.getarg(0).getint() + ref = op.getarg(1).getref_base() + self.optimizer.stm_location = (num, ref) + self.emit_operation(op) + dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', default=OptSTM.default_emit) diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -122,6 +122,43 @@ [op] = seen assert op.getarg(0).getint() == -42 + def test_stm_report_location_2(self): + myjitdriver = JitDriver(greens = ['a', 'r'], reds = ['x', 'res', 'n'], + stm_report_location = [0, 1]) + class Code(object): + pass + def g(a, r, x, n): + res = 0 + while x > 0: + myjitdriver.jit_merge_point(a=a, r=r, x=x, res=res, n=n) + res += x + x -= 1 + a = -a + if n & 1: + pass # sub-bridge of this bridge + return res + def main(a, x): + r = Code() + res = -1 + n = 7 + while n > 0: + res = g(a, r, x, n) + n -= 1 + return res + res = self.meta_interp(main, [42, 10], translationoptions={"stm":True}) + assert res == 55 + self.check_resops(debug_merge_point=6) + # + from rpython.jit.metainterp.warmspot import get_stats + seen = [] + for loop in get_stats().get_all_loops(): + for op in loop._all_operations(): + if op.getopname() == "stm_set_location": + seen.append(op) + [op1, op2] = seen + assert op1.getarg(0).getint() == -42 + assert op2.getarg(0).getint() == -42 + class TestLLtype(STMTests, LLJitMixin): pass From noreply at buildbot.pypy.org Sun Apr 20 15:31:43 2014 From: noreply at buildbot.pypy.org (vext01) Date: Sun, 20 Apr 2014 15:31:43 +0200 (CEST) Subject: [pypy-commit] pypy openbsd-lib-prefix: Like Linux, the BSDs use a 'lib' prefix for shared objects. Message-ID: <20140420133143.90CDB1C0543@cobra.cs.uni-duesseldorf.de> Author: Edd Barrett Branch: openbsd-lib-prefix Changeset: r70803:5b1c49e8419c Date: 2014-04-20 13:08 +0100 http://bitbucket.org/pypy/pypy/changeset/5b1c49e8419c/ Log: Like Linux, the BSDs use a 'lib' prefix for shared objects. Fixes problems with HippyVM tests where we see thinks like: NotImplementedError: cannot find any of the libraries ['timelib', 'timelib1'] diff --git a/rpython/translator/platform/bsd.py b/rpython/translator/platform/bsd.py --- a/rpython/translator/platform/bsd.py +++ b/rpython/translator/platform/bsd.py @@ -6,6 +6,7 @@ DEFAULT_CC = 'clang' so_ext = 'so' + so_prefixes = ('lib', '') make_cmd = 'gmake' standalone_only = [] From noreply at buildbot.pypy.org Sun Apr 20 15:31:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 20 Apr 2014 15:31:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in vext01/pypy/openbsd-lib-prefix (pull request #231) Message-ID: <20140420133144.B358F1C0543@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70804:c65c5258c686 Date: 2014-04-20 15:31 +0200 http://bitbucket.org/pypy/pypy/changeset/c65c5258c686/ Log: Merged in vext01/pypy/openbsd-lib-prefix (pull request #231) Like Linux, the BSDs use a 'lib' prefix for shared objects. diff --git a/rpython/translator/platform/bsd.py b/rpython/translator/platform/bsd.py --- a/rpython/translator/platform/bsd.py +++ b/rpython/translator/platform/bsd.py @@ -6,6 +6,7 @@ DEFAULT_CC = 'clang' so_ext = 'so' + so_prefixes = ('lib', '') make_cmd = 'gmake' standalone_only = [] From noreply at buildbot.pypy.org Mon Apr 21 07:29:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 21 Apr 2014 07:29:21 +0200 (CEST) Subject: [pypy-commit] pypy default: reduce code dup in searchsort Message-ID: <20140421052921.D1CB11C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70805:c39475b4b4f7 Date: 2014-04-21 01:28 -0400 http://bitbucket.org/pypy/pypy/changeset/c39475b4b4f7/ Log: reduce code dup in searchsort diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -22,4 +22,3 @@ arr[j] = i i += step return arr - diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -677,23 +677,23 @@ def descr_round(self, space, decimals=0, w_out=None): if space.is_none(w_out): if self.get_dtype().is_bool(): - #numpy promotes bool.round() to float16. Go figure. + # numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), - descriptor.get_dtype_cache(space).w_float16dtype) + descriptor.get_dtype_cache(space).w_float16dtype) else: w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) out = descriptor.dtype_agreement(space, [self], self.get_shape(), - w_out) + w_out) if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = descriptor.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() if decimals == 0: - out = out.descr_view(space,space.type(self)) + out = out.descr_view(space, space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out @@ -711,16 +711,16 @@ side = 'r' else: raise oefmt(space.w_ValueError, - "'%s' is an invalid value for keyword 'side'", side) + "'%s' is an invalid value for keyword 'side'", side) if len(self.get_shape()) > 1: raise OperationError(space.w_ValueError, space.wrap( - "a must be a 1-d array")) + "a must be a 1-d array") v = convert_to_array(space, w_v) - if len(v.get_shape()) >1: + if len(v.get_shape()) > 1: raise OperationError(space.w_ValueError, space.wrap( - "v must be a 1-d array-like")) - ret = W_NDimArray.from_shape(space, v.get_shape(), - descriptor.get_dtype_cache(space).w_longdtype) + "v must be a 1-d array-like") + ret = W_NDimArray.from_shape( + space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) return ret @@ -1277,35 +1277,26 @@ app_searchsort = applevel(r""" def searchsort(arr, v, side, result): - def left_find_index(a, val): + import operator + def func(a, op, val): imin = 0 imax = a.size while imin < imax: imid = imin + ((imax - imin) >> 1) - if a[imid] < val: - imin = imid +1 - else: - imax = imid - return imin - def right_find_index(a, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if a[imid] <= val: + if op(a[imid], val): imin = imid +1 else: imax = imid return imin if side == 'l': - func = left_find_index + op = operator.lt else: - func = right_find_index + op = operator.le if v.size < 2: - result[...] = func(arr, v) + result[...] = func(arr, op, v) else: for i in range(v.size): - result[i] = func(arr, v[i]) + result[i] = func(arr, op, v[i]) return result """, filename=__file__).interphook('searchsort') diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -362,4 +362,3 @@ assert (ret == [0, 5, 1, 2]).all() if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))") - From noreply at buildbot.pypy.org Mon Apr 21 15:13:34 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 15:13:34 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Guess what, there were two different versions of the YieldFrom implementation. Message-ID: <20140421131334.296121C0185@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70806:ab33ccfe90fb Date: 2014-04-14 22:28 +0200 http://bitbucket.org/pypy/pypy/changeset/ab33ccfe90fb/ Log: Guess what, there were two different versions of the YieldFrom implementation. diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1468,11 +1468,10 @@ class Yield(expr): - def __init__(self, is_from, value, lineno, col_offset): - self.is_from = is_from + def __init__(self, value, lineno, col_offset): self.value = value expr.__init__(self, lineno, col_offset) - self.initialization_state = 15 + self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Yield(self) @@ -1483,15 +1482,37 @@ return visitor.visit_Yield(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'is_from', None], 'Yield') + if (self.initialization_state & ~4) ^ 3: + self.missing_field(space, ['lineno', 'col_offset', None], 'Yield') else: - if not self.initialization_state & 8: + if not self.initialization_state & 4: self.value = None if self.value: self.value.sync_app_attrs(space) +class YieldFrom(expr): + + def __init__(self, value, lineno, col_offset): + self.value = value + expr.__init__(self, lineno, col_offset) + self.initialization_state = 7 + + def walkabout(self, visitor): + visitor.visit_YieldFrom(self) + + def mutate_over(self, visitor): + self.value = self.value.mutate_over(visitor) + return visitor.visit_YieldFrom(self) + + def sync_app_attrs(self, space): + if (self.initialization_state & ~0) ^ 7: + self.missing_field(space, ['lineno', 'col_offset', 'value'], 'YieldFrom') + else: + pass + self.value.sync_app_attrs(space) + + class Compare(expr): def __init__(self, left, ops, comparators, lineno, col_offset): @@ -2582,6 +2603,8 @@ return self.default_visitor(node) def visit_Yield(self, node): return self.default_visitor(node) + def visit_YieldFrom(self, node): + return self.default_visitor(node) def visit_Compare(self, node): return self.default_visitor(node) def visit_Call(self, node): @@ -2786,6 +2809,9 @@ if node.value: node.value.walkabout(self) + def visit_YieldFrom(self, node): + node.value.walkabout(self) + def visit_Compare(self, node): node.left.walkabout(self) self.visit_sequence(node.comparators) @@ -5919,40 +5945,12 @@ __init__=interp2app(GeneratorExp_init), ) -def Yield_get_is_from(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'is_from') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'is_from') - return space.wrap(w_self.is_from) - -def Yield_set_is_from(space, w_self, w_new_value): - try: - w_self.is_from = space.int_w(w_new_value) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'is_from', w_new_value) - w_self.initialization_state &= ~4 - return - # need to save the original object too - w_self.setdictvalue(space, 'is_from', w_new_value) - w_self.initialization_state |= 4 - -def Yield_del_is_from(space, w_self): - # Check if the element exists, raise appropriate exceptions - Yield_get_is_from(space, w_self) - w_self.deldictvalue(space, 'is_from') - w_self.initialization_state &= ~4 - def Yield_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') if w_obj is not None: return w_obj - if not w_self.initialization_state & 8: + if not w_self.initialization_state & 4: raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) @@ -5965,24 +5963,24 @@ if not e.match(space, space.w_TypeError): raise w_self.setdictvalue(space, 'value', w_new_value) - w_self.initialization_state &= ~8 + w_self.initialization_state &= ~4 return w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 8 + w_self.initialization_state |= 4 def Yield_del_value(space, w_self): # Check if the element exists, raise appropriate exceptions Yield_get_value(space, w_self) w_self.deldictvalue(space, 'value') - w_self.initialization_state &= ~8 - -_Yield_field_unroller = unrolling_iterable(['is_from', 'value']) + w_self.initialization_state &= ~4 + +_Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Yield, w_self) args_w, kwargs_w = __args__.unpack() if args_w: - if len(args_w) != 2: - w_err = space.wrap("Yield constructor takes either 0 or 2 positional arguments") + if len(args_w) != 1: + w_err = space.wrap("Yield constructor takes either 0 or 1 positional argument") raise OperationError(space.w_TypeError, w_err) i = 0 for field in _Yield_field_unroller: @@ -5994,13 +5992,65 @@ Yield.typedef = typedef.TypeDef("Yield", expr.typedef, __module__='_ast', - _fields=_FieldsWrapper(['is_from', 'value']), - is_from=typedef.GetSetProperty(Yield_get_is_from, Yield_set_is_from, Yield_del_is_from, cls=Yield), + _fields=_FieldsWrapper(['value']), value=typedef.GetSetProperty(Yield_get_value, Yield_set_value, Yield_del_value, cls=Yield), __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) +def YieldFrom_get_value(space, w_self): + if w_self.w_dict is not None: + w_obj = w_self.getdictvalue(space, 'value') + if w_obj is not None: + return w_obj + if not w_self.initialization_state & 4: + raise_attriberr(space, w_self, 'value') + return space.wrap(w_self.value) + +def YieldFrom_set_value(space, w_self, w_new_value): + try: + w_self.value = space.interp_w(expr, w_new_value, False) + if type(w_self.value) is expr: + raise OperationError(space.w_TypeError, space.w_None) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + w_self.setdictvalue(space, 'value', w_new_value) + w_self.initialization_state &= ~4 + return + w_self.deldictvalue(space, 'value') + w_self.initialization_state |= 4 + +def YieldFrom_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + YieldFrom_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + +_YieldFrom_field_unroller = unrolling_iterable(['value']) +def YieldFrom_init(space, w_self, __args__): + w_self = space.descr_self_interp_w(YieldFrom, w_self) + args_w, kwargs_w = __args__.unpack() + if args_w: + if len(args_w) != 1: + w_err = space.wrap("YieldFrom constructor takes either 0 or 1 positional argument") + raise OperationError(space.w_TypeError, w_err) + i = 0 + for field in _YieldFrom_field_unroller: + space.setattr(w_self, space.wrap(field), args_w[i]) + i += 1 + for field, w_value in kwargs_w.iteritems(): + space.setattr(w_self, space.wrap(field), w_value) + +YieldFrom.typedef = typedef.TypeDef("YieldFrom", + expr.typedef, + __module__='_ast', + _fields=_FieldsWrapper(['value']), + value=typedef.GetSetProperty(YieldFrom_get_value, YieldFrom_set_value, YieldFrom_del_value, cls=YieldFrom), + __new__=interp2app(get_AST_new(YieldFrom)), + __init__=interp2app(YieldFrom_init), +) + def Compare_get_left(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'left') diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -821,7 +821,9 @@ expr = self.handle_testlist(arg_node.children[0]) else: expr = None - return ast.Yield(is_from, expr, expr_node.lineno, expr_node.column) + if is_from: + return ast.YieldFrom(expr, expr_node.lineno, expr_node.column) + return ast.Yield(expr, expr_node.lineno, expr_node.column) elif expr_node_type == syms.factor: if len(expr_node.children) == 1: expr_node = expr_node.children[0] diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl --- a/pypy/interpreter/astcompiler/tools/Python.asdl +++ b/pypy/interpreter/astcompiler/tools/Python.asdl @@ -60,7 +60,8 @@ | DictComp(expr key, expr value, comprehension* generators) | GeneratorExp(expr elt, comprehension* generators) -- the grammar constrains where yield expressions can occur - | Yield(int is_from, expr? value) + | Yield(expr? value) + | YieldFrom(expr value) -- need sequences for compare to distinguish between -- x < 4 < 3 and (x < 4) < 3 | Compare(expr left, cmpop* ops, expr* comparators) From noreply at buildbot.pypy.org Mon Apr 21 15:13:35 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 15:13:35 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add an AST validator, will prevent crashes when bad ast object are built and compiled. Message-ID: <20140421131335.6EC011C0185@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70807:fe0435cfe837 Date: 2014-04-21 15:10 +0200 http://bitbucket.org/pypy/pypy/changeset/fe0435cfe837/ Log: Add an AST validator, will prevent crashes when bad ast object are built and compiled. diff --git a/pypy/interpreter/astcompiler/test/test_validate.py b/pypy/interpreter/astcompiler/test/test_validate.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/astcompiler/test/test_validate.py @@ -0,0 +1,425 @@ +import os +from pypy.interpreter.error import OperationError +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.astcompiler import ast +from pypy.interpreter.astcompiler import validate + +class TestASTValidator: + def mod(self, mod, msg=None, mode="exec", exc=validate.ValidationError): + space = self.space + if isinstance(exc, W_Root): + w_exc = exc + exc = OperationError + else: + w_exc = None + with raises(exc) as cm: + validate.validate_ast(space, mod) + if w_exc is not None: + w_value = cm.value.get_w_value(space) + assert cm.value.match(space, w_exc) + exc_msg = str(cm.value) + else: + exc_msg = str(cm.value) + if msg is not None: + assert msg in exc_msg + + def expr(self, node, msg=None, exc=validate.ValidationError): + mod = ast.Module([ast.Expr(node, 0, 0)]) + self.mod(mod, msg, exc=exc) + + def stmt(self, stmt, msg=None): + mod = ast.Module([stmt]) + self.mod(mod, msg) + + def test_module(self): + m = ast.Interactive([ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)]) + self.mod(m, "must have Load context", "single") + m = ast.Expression(ast.Name("x", ast.Store, 0, 0)) + self.mod(m, "must have Load context", "eval") + + def _check_arguments(self, fac, check): + def arguments(args=None, vararg=None, varargannotation=None, + kwonlyargs=None, kwarg=None, kwargannotation=None, + defaults=None, kw_defaults=None): + if args is None: + args = [] + if kwonlyargs is None: + kwonlyargs = [] + if defaults is None: + defaults = [] + if kw_defaults is None: + kw_defaults = [] + args = ast.arguments(args, vararg, varargannotation, kwonlyargs, + kwarg, kwargannotation, defaults, kw_defaults) + return fac(args) + args = [ast.arg("x", ast.Name("x", ast.Store, 0, 0))] + check(arguments(args=args), "must have Load context") + check(arguments(varargannotation=ast.Num(self.space.wrap(3), 0, 0)), + "varargannotation but no vararg") + check(arguments(varargannotation=ast.Name("x", ast.Store, 0, 0), vararg="x"), + "must have Load context") + check(arguments(kwonlyargs=args), "must have Load context") + check(arguments(kwargannotation=ast.Num(self.space.wrap(42), 0, 0)), + "kwargannotation but no kwarg") + check(arguments(kwargannotation=ast.Name("x", ast.Store, 0, 0), + kwarg="x"), "must have Load context") + check(arguments(defaults=[ast.Num(self.space.wrap(3), 0, 0)]), + "more positional defaults than args") + check(arguments(kw_defaults=[ast.Num(self.space.wrap(4), 0, 0)]), + "length of kwonlyargs is not the same as kw_defaults") + args = [ast.arg("x", ast.Name("x", ast.Load, 0, 0))] + check(arguments(args=args, defaults=[ast.Name("x", ast.Store, 0, 0)]), + "must have Load context") + args = [ast.arg("a", ast.Name("x", ast.Load, 0, 0)), + ast.arg("b", ast.Name("y", ast.Load, 0, 0))] + check(arguments(kwonlyargs=args, + kw_defaults=[None, ast.Name("x", ast.Store, 0, 0)]), + "must have Load context") + + def test_funcdef(self): + a = ast.arguments([], None, None, [], None, None, [], []) + f = ast.FunctionDef("x", a, [], [], None, 0, 0) + self.stmt(f, "empty body on FunctionDef") + f = ast.FunctionDef("x", a, [ast.Pass(0, 0)], [ast.Name("x", ast.Store, 0, 0)], + None, 0, 0) + self.stmt(f, "must have Load context") + f = ast.FunctionDef("x", a, [ast.Pass(0, 0)], [], + ast.Name("x", ast.Store, 0, 0), 0, 0) + self.stmt(f, "must have Load context") + def fac(args): + return ast.FunctionDef("x", args, [ast.Pass(0, 0)], [], None, 0, 0) + self._check_arguments(fac, self.stmt) + + def test_classdef(self): + def cls(bases=None, keywords=None, starargs=None, kwargs=None, + body=None, decorator_list=None): + if bases is None: + bases = [] + if keywords is None: + keywords = [] + if body is None: + body = [ast.Pass(0, 0)] + if decorator_list is None: + decorator_list = [] + return ast.ClassDef("myclass", bases, keywords, starargs, + kwargs, body, decorator_list, 0, 0) + self.stmt(cls(bases=[ast.Name("x", ast.Store, 0, 0)]), + "must have Load context") + self.stmt(cls(keywords=[ast.keyword("x", ast.Name("x", ast.Store, 0, 0))]), + "must have Load context") + self.stmt(cls(starargs=ast.Name("x", ast.Store, 0, 0)), + "must have Load context") + self.stmt(cls(kwargs=ast.Name("x", ast.Store, 0, 0)), + "must have Load context") + self.stmt(cls(body=[]), "empty body on ClassDef") + self.stmt(cls(body=[None]), "None disallowed") + self.stmt(cls(decorator_list=[ast.Name("x", ast.Store, 0, 0)]), + "must have Load context") + + def test_delete(self): + self.stmt(ast.Delete([], 0, 0), "empty targets on Delete") + self.stmt(ast.Delete([None], 0, 0), "None disallowed") + self.stmt(ast.Delete([ast.Name("x", ast.Load, 0, 0)], 0, 0), + "must have Del context") + + def test_assign(self): + self.stmt(ast.Assign([], ast.Num(self.space.wrap(3), 0, 0), 0, 0), "empty targets on Assign") + self.stmt(ast.Assign([None], ast.Num(self.space.wrap(3), 0, 0), 0, 0), "None disallowed") + self.stmt(ast.Assign([ast.Name("x", ast.Load, 0, 0)], ast.Num(self.space.wrap(3), 0, 0), 0, 0), + "must have Store context") + self.stmt(ast.Assign([ast.Name("x", ast.Store, 0, 0)], + ast.Name("y", ast.Store, 0, 0), 0, 0), + "must have Load context") + + def test_augassign(self): + aug = ast.AugAssign(ast.Name("x", ast.Load, 0, 0), ast.Add, + ast.Name("y", ast.Load, 0, 0), 0, 0) + self.stmt(aug, "must have Store context") + aug = ast.AugAssign(ast.Name("x", ast.Store, 0, 0), ast.Add, + ast.Name("y", ast.Store, 0, 0), 0, 0) + self.stmt(aug, "must have Load context") + + def test_for(self): + x = ast.Name("x", ast.Store, 0, 0) + y = ast.Name("y", ast.Load, 0, 0) + p = ast.Pass(0, 0) + self.stmt(ast.For(x, y, [], [], 0, 0), "empty body on For") + self.stmt(ast.For(ast.Name("x", ast.Load, 0, 0), y, [p], [], 0, 0), + "must have Store context") + self.stmt(ast.For(x, ast.Name("y", ast.Store, 0, 0), [p], [], 0, 0), + "must have Load context") + e = ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0) + self.stmt(ast.For(x, y, [e], [], 0, 0), "must have Load context") + self.stmt(ast.For(x, y, [p], [e], 0, 0), "must have Load context") + + def test_while(self): + self.stmt(ast.While(ast.Num(self.space.wrap(3), 0, 0), [], [], 0, 0), "empty body on While") + self.stmt(ast.While(ast.Name("x", ast.Store, 0, 0), [ast.Pass(0, 0)], [], 0, 0), + "must have Load context") + self.stmt(ast.While(ast.Num(self.space.wrap(3), 0, 0), [ast.Pass(0, 0)], + [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], 0, 0), + "must have Load context") + + def test_if(self): + self.stmt(ast.If(ast.Num(self.space.wrap(3), 0, 0), [], [], 0, 0), "empty body on If") + i = ast.If(ast.Name("x", ast.Store, 0, 0), [ast.Pass(0, 0)], [], 0, 0) + self.stmt(i, "must have Load context") + i = ast.If(ast.Num(self.space.wrap(3), 0, 0), [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], [], 0, 0) + self.stmt(i, "must have Load context") + i = ast.If(ast.Num(self.space.wrap(3), 0, 0), [ast.Pass(0, 0)], + [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], 0, 0) + self.stmt(i, "must have Load context") + + @skip("enable when parser uses the new With construct") + def test_with(self): + p = ast.Pass(0, 0) + self.stmt(ast.With([], [p]), "empty items on With") + i = ast.withitem(ast.Num(self.space.wrap(3), 0, 0), None) + self.stmt(ast.With([i], []), "empty body on With") + i = ast.withitem(ast.Name("x", ast.Store, 0, 0), None) + self.stmt(ast.With([i], [p]), "must have Load context") + i = ast.withitem(ast.Num(self.space.wrap(3), 0, 0), ast.Name("x", ast.Load, 0, 0)) + self.stmt(ast.With([i], [p]), "must have Store context") + + def test_raise(self): + r = ast.Raise(None, ast.Num(self.space.wrap(3), 0, 0), 0, 0) + self.stmt(r, "Raise with cause but no exception") + r = ast.Raise(ast.Name("x", ast.Store, 0, 0), None, 0, 0) + self.stmt(r, "must have Load context") + r = ast.Raise(ast.Num(self.space.wrap(4), 0, 0), ast.Name("x", ast.Store, 0, 0), 0, 0) + self.stmt(r, "must have Load context") + + @skip("enable when parser uses the new Try construct") + def test_try(self): + p = ast.Pass(0, 0) + t = ast.Try([], [], [], [p]) + self.stmt(t, "empty body on Try") + t = ast.Try([ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], [], [], [p]) + self.stmt(t, "must have Load context") + t = ast.Try([p], [], [], []) + self.stmt(t, "Try has neither except handlers nor finalbody") + t = ast.Try([p], [], [p], [p]) + self.stmt(t, "Try has orelse but no except handlers") + t = ast.Try([p], [ast.ExceptHandler(None, "x", [])], [], []) + self.stmt(t, "empty body on ExceptHandler") + e = [ast.ExceptHandler(ast.Name("x", ast.Store, 0, 0), "y", [p])] + self.stmt(ast.Try([p], e, [], []), "must have Load context") + e = [ast.ExceptHandler(None, "x", [p])] + t = ast.Try([p], e, [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], [p]) + self.stmt(t, "must have Load context") + t = ast.Try([p], e, [p], [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)]) + self.stmt(t, "must have Load context") + + def test_assert(self): + self.stmt(ast.Assert(ast.Name("x", ast.Store, 0, 0), None, 0, 0), + "must have Load context") + assrt = ast.Assert(ast.Name("x", ast.Load, 0, 0), + ast.Name("y", ast.Store, 0, 0), 0, 0) + self.stmt(assrt, "must have Load context") + + def test_import(self): + self.stmt(ast.Import([], 0, 0), "empty names on Import") + + def test_importfrom(self): + imp = ast.ImportFrom(None, [ast.alias("x", None)], -42, 0, 0) + self.stmt(imp, "level less than -1") + self.stmt(ast.ImportFrom(None, [], 0, 0, 0), "empty names on ImportFrom") + + def test_global(self): + self.stmt(ast.Global([], 0, 0), "empty names on Global") + + def test_nonlocal(self): + self.stmt(ast.Nonlocal([], 0, 0), "empty names on Nonlocal") + + def test_expr(self): + e = ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0) + self.stmt(e, "must have Load context") + + def test_boolop(self): + b = ast.BoolOp(ast.And, [], 0, 0) + self.expr(b, "less than 2 values") + b = ast.BoolOp(ast.And, [ast.Num(self.space.wrap(3), 0, 0)], 0, 0) + self.expr(b, "less than 2 values") + b = ast.BoolOp(ast.And, [ast.Num(self.space.wrap(4), 0, 0), None], 0, 0) + self.expr(b, "None disallowed") + b = ast.BoolOp(ast.And, [ast.Num(self.space.wrap(4), 0, 0), ast.Name("x", ast.Store, 0, 0)], 0, 0) + self.expr(b, "must have Load context") + + def test_unaryop(self): + u = ast.UnaryOp(ast.Not, ast.Name("x", ast.Store, 0, 0), 0, 0) + self.expr(u, "must have Load context") + + def test_lambda(self): + a = ast.arguments([], None, None, [], None, None, [], []) + self.expr(ast.Lambda(a, ast.Name("x", ast.Store, 0, 0), 0, 0), + "must have Load context") + def fac(args): + return ast.Lambda(args, ast.Name("x", ast.Load, 0, 0), 0, 0) + self._check_arguments(fac, self.expr) + + def test_ifexp(self): + l = ast.Name("x", ast.Load, 0, 0) + s = ast.Name("y", ast.Store, 0, 0) + for args in (s, l, l), (l, s, l), (l, l, s): + self.expr(ast.IfExp(*(args + (0, 0))), "must have Load context") + + def test_dict(self): + d = ast.Dict([], [ast.Name("x", ast.Load, 0, 0)], 0, 0) + self.expr(d, "same number of keys as values") + d = ast.Dict([None], [ast.Name("x", ast.Load, 0, 0)], 0, 0) + self.expr(d, "None disallowed") + d = ast.Dict([ast.Name("x", ast.Load, 0, 0)], [None], 0, 0) + self.expr(d, "None disallowed") + + def test_set(self): + self.expr(ast.Set([None], 0, 0), "None disallowed") + s = ast.Set([ast.Name("x", ast.Store, 0, 0)], 0, 0) + self.expr(s, "must have Load context") + + def _check_comprehension(self, fac): + self.expr(fac([]), "comprehension with no generators") + g = ast.comprehension(ast.Name("x", ast.Load, 0, 0), + ast.Name("x", ast.Load, 0, 0), []) + self.expr(fac([g]), "must have Store context") + g = ast.comprehension(ast.Name("x", ast.Store, 0, 0), + ast.Name("x", ast.Store, 0, 0), []) + self.expr(fac([g]), "must have Load context") + x = ast.Name("x", ast.Store, 0, 0) + y = ast.Name("y", ast.Load, 0, 0) + g = ast.comprehension(x, y, [None]) + self.expr(fac([g]), "None disallowed") + g = ast.comprehension(x, y, [ast.Name("x", ast.Store, 0, 0)]) + self.expr(fac([g]), "must have Load context") + + def _simple_comp(self, fac): + g = ast.comprehension(ast.Name("x", ast.Store, 0, 0), + ast.Name("x", ast.Load, 0, 0), []) + self.expr(fac(ast.Name("x", ast.Store, 0, 0), [g], 0, 0), + "must have Load context") + def wrap(gens): + return fac(ast.Name("x", ast.Store, 0, 0), gens, 0, 0) + self._check_comprehension(wrap) + + def test_listcomp(self): + self._simple_comp(ast.ListComp) + + def test_setcomp(self): + self._simple_comp(ast.SetComp) + + def test_generatorexp(self): + self._simple_comp(ast.GeneratorExp) + + def test_dictcomp(self): + g = ast.comprehension(ast.Name("y", ast.Store, 0, 0), + ast.Name("p", ast.Load, 0, 0), []) + c = ast.DictComp(ast.Name("x", ast.Store, 0, 0), + ast.Name("y", ast.Load, 0, 0), [g], 0, 0) + self.expr(c, "must have Load context") + c = ast.DictComp(ast.Name("x", ast.Load, 0, 0), + ast.Name("y", ast.Store, 0, 0), [g], 0, 0) + self.expr(c, "must have Load context") + def factory(comps): + k = ast.Name("x", ast.Load, 0, 0) + v = ast.Name("y", ast.Load, 0, 0) + return ast.DictComp(k, v, comps, 0, 0) + self._check_comprehension(factory) + + def test_yield(self): + self.expr(ast.Yield(ast.Name("x", ast.Store, 0, 0), 0, 0), "must have Load") + self.expr(ast.YieldFrom(ast.Name("x", ast.Store, 0, 0), 0, 0), "must have Load") + + def test_compare(self): + left = ast.Name("x", ast.Load, 0, 0) + comp = ast.Compare(left, [ast.In], [], 0, 0) + self.expr(comp, "no comparators") + comp = ast.Compare(left, [ast.In], [ast.Num(self.space.wrap(4), 0, 0), ast.Num(self.space.wrap(5), 0, 0)], 0, 0) + self.expr(comp, "different number of comparators and operands") + comp = ast.Compare(ast.Num(self.space.wrap("blah"), 0, 0), [ast.In], [left], 0, 0) + self.expr(comp, "non-numeric", exc=self.space.w_TypeError) + comp = ast.Compare(left, [ast.In], [ast.Num(self.space.wrap("blah"), 0, 0)], 0, 0) + self.expr(comp, "non-numeric", exc=self.space.w_TypeError) + + def test_call(self): + func = ast.Name("x", ast.Load, 0, 0) + args = [ast.Name("y", ast.Load, 0, 0)] + keywords = [ast.keyword("w", ast.Name("z", ast.Load, 0, 0))] + stararg = ast.Name("p", ast.Load, 0, 0) + kwarg = ast.Name("q", ast.Load, 0, 0) + call = ast.Call(ast.Name("x", ast.Store, 0, 0), args, keywords, stararg, + kwarg, 0, 0) + self.expr(call, "must have Load context") + call = ast.Call(func, [None], keywords, stararg, kwarg, 0, 0) + self.expr(call, "None disallowed") + bad_keywords = [ast.keyword("w", ast.Name("z", ast.Store, 0, 0))] + call = ast.Call(func, args, bad_keywords, stararg, kwarg, 0, 0) + self.expr(call, "must have Load context") + call = ast.Call(func, args, keywords, ast.Name("z", ast.Store, 0, 0), kwarg, 0, 0) + self.expr(call, "must have Load context") + call = ast.Call(func, args, keywords, stararg, + ast.Name("w", ast.Store, 0, 0), 0, 0) + self.expr(call, "must have Load context") + + def test_num(self): + space = self.space + w_objs = space.appexec([], """(): + class subint(int): + pass + class subfloat(float): + pass + class subcomplex(complex): + pass + return ("0", "hello", subint(), subfloat(), subcomplex()) + """) + for w_obj in space.unpackiterable(w_objs): + self.expr(ast.Num(w_obj, 0, 0), "non-numeric", exc=self.space.w_TypeError) + + def test_attribute(self): + attr = ast.Attribute(ast.Name("x", ast.Store, 0, 0), "y", ast.Load, 0, 0) + self.expr(attr, "must have Load context") + + def test_subscript(self): + sub = ast.Subscript(ast.Name("x", ast.Store, 0, 0), ast.Index(ast.Num(self.space.wrap(3), 0, 0)), + ast.Load, 0, 0) + self.expr(sub, "must have Load context") + x = ast.Name("x", ast.Load, 0, 0) + sub = ast.Subscript(x, ast.Index(ast.Name("y", ast.Store, 0, 0)), + ast.Load, 0, 0) + self.expr(sub, "must have Load context") + s = ast.Name("x", ast.Store, 0, 0) + for args in (s, None, None), (None, s, None), (None, None, s): + sl = ast.Slice(*args) + self.expr(ast.Subscript(x, sl, ast.Load, 0, 0), + "must have Load context") + sl = ast.ExtSlice([]) + self.expr(ast.Subscript(x, sl, ast.Load, 0, 0), "empty dims on ExtSlice") + sl = ast.ExtSlice([ast.Index(s)]) + self.expr(ast.Subscript(x, sl, ast.Load, 0, 0), "must have Load context") + + def test_starred(self): + left = ast.List([ast.Starred(ast.Name("x", ast.Load, 0, 0), ast.Store, 0, 0)], + ast.Store, 0, 0) + assign = ast.Assign([left], ast.Num(self.space.wrap(4), 0, 0), 0, 0) + self.stmt(assign, "must have Store context") + + def _sequence(self, fac): + self.expr(fac([None], ast.Load, 0, 0), "None disallowed") + self.expr(fac([ast.Name("x", ast.Store, 0, 0)], ast.Load, 0, 0), + "must have Load context") + + def test_list(self): + self._sequence(ast.List) + + def test_tuple(self): + self._sequence(ast.Tuple) + + def test_stdlib_validates(self): + stdlib = os.path.join(os.path.dirname(ast.__file__), '../../../lib-python/3') + tests = ["os.py", "test/test_grammar.py", "test/test_unpack_ex.py"] + for module in tests: + fn = os.path.join(stdlib, module) + print 'compiling', fn + with open(fn, "r") as fp: + source = fp.read() + ec = self.space.getexecutioncontext() + ast_node = ec.compiler.compile_to_ast(source, fn, "exec", 0) + ec.compiler.validate_ast(ast_node) diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/astcompiler/validate.py @@ -0,0 +1,410 @@ +"""A visitor to validate an AST object.""" + +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.astcompiler import ast +from rpython.tool.pairtype import pair, pairtype +from pypy.interpreter.baseobjspace import W_Root + + +def validate_ast(space, node): + node.walkabout(AstValidator(space)) + + +class ValidationError(Exception): + pass + + +def expr_context_name(ctx): + if not 1 <= ctx <= len(ast.expr_context_to_class): + return '??' + return ast.expr_context_to_class[ctx - 1].typedef.name + +def _check_context(expected_ctx, actual_ctx): + if expected_ctx != actual_ctx: + raise ValidationError( + "expression must have %s context but has %s instead" % + (expr_context_name(expected_ctx), expr_context_name(actual_ctx))) + + +class __extend__(ast.AST): + + def check_context(self, visitor, ctx): + raise AssertionError("should only be on expressions") + + def walkabout_with_ctx(self, visitor, ctx): + self.walkabout(visitor) # With "load" context. + + +class __extend__(ast.expr): + + def check_context(self, visitor, ctx): + if ctx != ast.Load: + raise ValidationError( + "expression which can't be assigned to in %s context" % + expr_context_name(ctx)) + + +class __extend__(ast.Name): + + def check_context(self, visitor, ctx): + _check_context(ctx, self.ctx) + + +class __extend__(ast.List): + + def check_context(self, visitor, ctx): + _check_context(ctx, self.ctx) + + def walkabout_with_ctx(self, visitor, ctx): + visitor._validate_exprs(self.elts, ctx) + + +class __extend__(ast.Tuple): + + def check_context(self, visitor, ctx): + _check_context(ctx, self.ctx) + + def walkabout_with_ctx(self, visitor, ctx): + visitor._validate_exprs(self.elts, ctx) + + +class __extend__(ast.Starred): + + def check_context(self, visitor, ctx): + _check_context(ctx, self.ctx) + + def walkabout_with_ctx(self, visitor, ctx): + visitor._validate_expr(self.value, ctx) + + +class __extend__(ast.Subscript): + + def check_context(self, visitor, ctx): + _check_context(ctx, self.ctx) + + +class __extend__(ast.Attribute): + + def check_context(self, visitor, ctx): + _check_context(ctx, self.ctx) + + +class AstValidator(ast.ASTVisitor): + def __init__(self, space): + self.space = space + + def _validate_stmts(self, stmts): + if not stmts: + return + for stmt in stmts: + if not stmt: + raise ValidationError("None disallowed in statement list") + stmt.walkabout(self) + + def _len(self, node): + if node is None: + return 0 + return len(node) + + def _validate_expr(self, expr, ctx=ast.Load): + expr.check_context(self, ctx) + expr.walkabout_with_ctx(self, ctx) + + def _validate_exprs(self, exprs, ctx=ast.Load, null_ok=False): + if not exprs: + return + for expr in exprs: + if expr: + self._validate_expr(expr, ctx) + elif not null_ok: + raise ValidationError("None disallowed in expression list") + + def _validate_body(self, body, owner): + self._validate_nonempty_seq(body, "body", owner) + self._validate_stmts(body) + + def _validate_nonempty_seq(self, seq, what, owner): + if not seq: + raise ValidationError("empty %s on %s" % (what, owner)) + + def _validate_nonempty_seq_s(self, seq, what, owner): + if not seq: + raise ValidationError("empty %s on %s" % (what, owner)) + + def visit_Interactive(self, node): + self._validate_stmts(node.body) + + def visit_Module(self, node): + self._validate_stmts(node.body) + + def visit_Expression(self, node): + self._validate_expr(node.body) + + # Statements + + def visit_arg(self, node): + if node.annotation: + self._validate_expr(node.annotation) + + def visit_arguments(self, node): + self.visit_sequence(node.args) + if node.varargannotation: + if not node.vararg: + raise ValidationError("varargannotation but no vararg on arguments") + self._validate_expr(node.varargannotation) + self.visit_sequence(node.kwonlyargs) + if node.kwargannotation: + if not node.kwarg: + raise ValidationError("kwargannotation but no kwarg on arguments") + self._validate_expr(node.kwargannotation) + if self._len(node.defaults) > self._len(node.args): + raise ValidationError("more positional defaults than args on arguments") + if self._len(node.kw_defaults) != self._len(node.kwonlyargs): + raise ValidationError("length of kwonlyargs is not the same as " + "kw_defaults on arguments") + self._validate_exprs(node.defaults) + self._validate_exprs(node.kw_defaults, null_ok=True) + + def visit_FunctionDef(self, node): + self._validate_body(node.body, "FunctionDef") + node.args.walkabout(self) + self._validate_exprs(node.decorator_list) + if node.returns: + self._validate_expr(node.returns) + + def visit_keyword(self, node): + self._validate_expr(node.value) + + def visit_ClassDef(self, node): + self._validate_body(node.body, "ClassDef") + self._validate_exprs(node.bases) + self.visit_sequence(node.keywords) + self._validate_exprs(node.decorator_list) + if node.starargs: + self._validate_expr(node.starargs) + if node.kwargs: + self._validate_expr(node.kwargs) + + def visit_Return(self, node): + if node.value: + self._validate_expr(node.value) + + def visit_Delete(self, node): + self._validate_nonempty_seq(node.targets, "targets", "Delete") + self._validate_exprs(node.targets, ast.Del) + + def visit_Assign(self, node): + self._validate_nonempty_seq(node.targets, "targets", "Assign") + self._validate_exprs(node.targets, ast.Store) + self._validate_expr(node.value) + + def visit_AugAssign(self, node): + self._validate_expr(node.target, ast.Store) + self._validate_expr(node.value) + + def visit_For(self, node): + self._validate_expr(node.target, ast.Store) + self._validate_expr(node.iter) + self._validate_body(node.body, "For") + self._validate_stmts(node.orelse) + + def visit_While(self, node): + self._validate_expr(node.test) + self._validate_body(node.body, "While") + self._validate_stmts(node.orelse) + + def visit_If(self, node): + self._validate_expr(node.test) + self._validate_body(node.body, "If") + self._validate_stmts(node.orelse) + + def visit_With(self, node): + self._validate_expr(node.context_expr) + if node.optional_vars: + self._validate_expr(node.optional_vars, ast.Store) + self._validate_body(node.body, "With") + + def visit_Raise(self, node): + if node.exc: + self._validate_expr(node.exc) + if node.cause: + self._validate_expr(node.cause) + elif node.cause: + raise ValidationError("Raise with cause but no exception") + + def visit_TryExcept(self, node): + self._validate_body(node.body, "TryExcept") + for handler in node.handlers: + handler.walkabout(self) + self._validate_stmts(node.orelse) + + def visit_TryFinally(self, node): + self._validate_body(node.body, "TryFinally") + self._validate_body(node.finalbody, "TryFinally") + + def visit_ExceptHandler(self, node): + if node.type: + self._validate_expr(node.type) + self._validate_body(node.body, "ExceptHandler") + + def visit_Assert(self, node): + self._validate_expr(node.test) + if node.msg: + self._validate_expr(node.msg) + + def visit_Import(self, node): + self._validate_nonempty_seq(node.names, "names", "Import") + + def visit_ImportFrom(self, node): + if node.level < -1: + raise ValidationError("ImportFrom level less than -1") + self._validate_nonempty_seq(node.names, "names", "ImportFrom") + + def visit_Global(self, node): + self._validate_nonempty_seq_s(node.names, "names", "Global") + + def visit_Nonlocal(self, node): + self._validate_nonempty_seq_s(node.names, "names", "Nonlocal") + + def visit_Expr(self, node): + self._validate_expr(node.value) + + def visit_Pass(self, node): + pass + + def visit_Break(self, node): + pass + + def visit_Continue(self, node): + pass + + # Expressions + + def visit_Name(self, node): + pass + + def visit_Ellipsis(self, node): + pass + + def visit_BoolOp(self, node): + if len(node.values) < 2: + raise ValidationError("BoolOp with less than 2 values") + self._validate_exprs(node.values) + + def visit_UnaryOp(self, node): + self._validate_expr(node.operand) + + def visit_BinOp(self, node): + self._validate_expr(node.left) + self._validate_expr(node.right) + + def visit_Lambda(self, node): + node.args.walkabout(self) + self._validate_expr(node.body) + + def visit_IfExp(self, node): + self._validate_expr(node.test) + self._validate_expr(node.body) + self._validate_expr(node.orelse) + + def visit_Dict(self, node): + if self._len(node.keys) != self._len(node.values): + raise ValidationError( + "Dict doesn't have the same number of keys as values") + self._validate_exprs(node.keys) + self._validate_exprs(node.values) + + def visit_Set(self, node): + self._validate_exprs(node.elts) + + def _validate_comprehension(self, generators): + if not generators: + raise ValidationError("comprehension with no generators") + for comp in generators: + self._validate_expr(comp.target, ast.Store) + self._validate_expr(comp.iter) + self._validate_exprs(comp.ifs) + + def visit_ListComp(self, node): + self._validate_comprehension(node.generators) + self._validate_expr(node.elt) + + def visit_SetComp(self, node): + self._validate_comprehension(node.generators) + self._validate_expr(node.elt) + + def visit_GeneratorExp(self, node): + self._validate_comprehension(node.generators) + self._validate_expr(node.elt) + + def visit_DictComp(self, node): + self._validate_comprehension(node.generators) + self._validate_expr(node.key) + self._validate_expr(node.value) + + def visit_Yield(self, node): + if node.value: + self._validate_expr(node.value) + + def visit_YieldFrom(self, node): + self._validate_expr(node.value) + + def visit_Compare(self, node): + if not node.comparators: + raise ValidationError("Compare with no comparators") + if len(node.comparators) != len(node.ops): + raise ValidationError("Compare has a different number " + "of comparators and operands") + self._validate_exprs(node.comparators) + self._validate_expr(node.left) + + def visit_Call(self, node): + self._validate_expr(node.func) + self._validate_exprs(node.args) + self.visit_sequence(node.keywords) + if node.starargs: + self._validate_expr(node.starargs) + if node.kwargs: + self._validate_expr(node.kwargs) + + def visit_Num(self, node): + space = self.space + w_type = space.type(node.n) + if w_type not in [space.w_int, space.w_float, space.w_complex]: + raise oefmt(space.w_TypeError, "non-numeric type in Num") + + def visit_Str(self, node): + space = self.space + w_type = space.type(node.s) + if w_type != space.w_unicode: + raise oefmt(space.w_TypeError, "non-string type in Str") + + def visit_Bytes(self, node): + space = self.space + w_type = space.type(node.s) + if w_type != space.w_bytes: + raise oefmt(space.w_TypeError, "non-bytes type in Bytes") + + def visit_Attribute(self, node): + self._validate_expr(node.value) + + def visit_Subscript(self, node): + node.slice.walkabout(self) + self._validate_expr(node.value) + + # Subscripts + def visit_Slice(self, node): + if node.lower: + self._validate_expr(node.lower) + if node.upper: + self._validate_expr(node.upper) + if node.step: + self._validate_expr(node.step) + + def visit_ExtSlice(self, node): + self._validate_nonempty_seq(node.dims, "dims", "ExtSlice") + for dim in node.dims: + dim.walkabout(self) + + def visit_Index(self, node): + self._validate_expr(node.value) diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -6,7 +6,7 @@ from pypy.interpreter import pycode from pypy.interpreter.pyparser import future, pyparse, error as parseerror from pypy.interpreter.astcompiler import (astbuilder, codegen, consts, misc, - optimize, ast) + optimize, ast, validate) from pypy.interpreter.error import OperationError @@ -136,6 +136,9 @@ e.wrap_info(space)) return code + def validate_ast(self, node): + validate.validate_ast(self.space, node) + def compile_to_ast(self, source, filename, mode, flags): info = pyparse.CompileInfo(filename, mode, flags) return self._compile_to_ast(source, info) diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -56,6 +56,7 @@ # XXX: optimize flag is not used if ast_node is not None: + ec.compiler.validate_ast(ast_node) code = ec.compiler.compile_ast(ast_node, filename, mode, flags) elif flags & consts.PyCF_ONLY_AST: ast_node = ec.compiler.compile_to_ast(source, filename, mode, flags) From noreply at buildbot.pypy.org Mon Apr 21 15:17:39 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 21 Apr 2014 15:17:39 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: a test showing the remaining problems of dicts and bridges Message-ID: <20140421131739.9F5931C06C3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70808:e06ce12276e8 Date: 2014-04-21 15:16 +0200 http://bitbucket.org/pypy/pypy/changeset/e06ce12276e8/ Log: a test showing the remaining problems of dicts and bridges diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -342,6 +342,21 @@ self.meta_interp(f, [10]) self.check_simple_loop(call_may_force=0, call=3) + def test_dict_virtual(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + def f(n): + d = {} + while n > 0: + myjitdriver.jit_merge_point() + if n % 10 == 0: + n -= len(d) + d = {} + d["a"] = n + n -= 1 + return len(d) + self.meta_interp(f, [100]) + self.check_simple_loop(call_may_force=0, call=3) + class TestLLtype(DictTests, LLJitMixin): pass From noreply at buildbot.pypy.org Mon Apr 21 15:35:41 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 21 Apr 2014 15:35:41 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: gah, double not :-( Message-ID: <20140421133541.B8E281C0543@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70809:3398c41c70bb Date: 2014-04-21 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/3398c41c70bb/ Log: gah, double not :-( just shows that unit tests for VArrayStructStateInfo.generate_guards would really be cool diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -226,7 +226,7 @@ # XXX this needs a test in test_virtualstate!!! if not isinstance(other, VArrayStructStateInfo): raise VirtualStatesCantMatch("other is not an VArrayStructStateInfo") - if not self.arraydescr is not other.arraydescr: + if self.arraydescr is not other.arraydescr: raise VirtualStatesCantMatch("other is a different kind of array") if len(self.fielddescrs) != len(other.fielddescrs): diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -355,7 +355,7 @@ n -= 1 return len(d) self.meta_interp(f, [100]) - self.check_simple_loop(call_may_force=0, call=3) + self.check_simple_loop(call_may_force=0, call=0, new=0) class TestLLtype(DictTests, LLJitMixin): From noreply at buildbot.pypy.org Mon Apr 21 18:07:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 18:07:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Emit stm_set_location just after the relevant debug_merge_points Message-ID: <20140421160741.9460C1C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70810:906accbe3114 Date: 2014-04-20 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/906accbe3114/ Log: Emit stm_set_location just after the relevant debug_merge_points diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -1,7 +1,7 @@ from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, ) from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, ResOperation class OptSTM(Optimization): """ @@ -85,15 +85,17 @@ self.emit_operation(op) def optimize_DEBUG_MERGE_POINT(self, op): + self.emit_operation(op) jdindex = op.getarg(0).getint() jd = self.optimizer.metainterp_sd.warmrunnerdesc.jitdrivers_sd[jdindex] report_location = jd.stm_report_location if report_location is not None: idx_num, idx_ref = report_location - num = op.getarg(3 + idx_num).getint() - ref = op.getarg(3 + idx_ref).getref_base() - self.optimizer.stm_location = (num, ref) - self.emit_operation(op) + num_box = op.getarg(3 + idx_num) + ref_box = op.getarg(3 + idx_ref) + loc_op = ResOperation(rop.STM_SET_LOCATION, [num_box, ref_box], + None) + self.optimize_STM_SET_LOCATION(loc_op) def optimize_STM_SET_LOCATION(self, op): num = op.getarg(0).getint() diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -112,6 +112,7 @@ res = self.meta_interp(main, [42, 10], translationoptions={"stm":True}) assert res == 55 self.check_resops(debug_merge_point=6) + self.check_resops(stm_set_location=6) # on the main loop # from rpython.jit.metainterp.warmspot import get_stats seen = [] @@ -119,7 +120,8 @@ for op in loop._all_operations(): if op.getopname() == "stm_set_location": seen.append(op) - [op] = seen + assert len(seen) == 6 + 1 + op = seen[-1] assert op.getarg(0).getint() == -42 def test_stm_report_location_2(self): @@ -155,7 +157,8 @@ for op in loop._all_operations(): if op.getopname() == "stm_set_location": seen.append(op) - [op1, op2] = seen + assert len(seen) == 6 + 2 + [op1, op2] = seen[-2:] assert op1.getarg(0).getint() == -42 assert op2.getarg(0).getint() == -42 From noreply at buildbot.pypy.org Mon Apr 21 18:07:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 18:07:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Kill stm_set_location again, and instead add a reference to a StmLocation Message-ID: <20140421160742.C095A1C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70811:b8dc286ffdb4 Date: 2014-04-21 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/b8dc286ffdb4/ Log: Kill stm_set_location again, and instead add a reference to a StmLocation object to every ResOp. Should allow better location information in case operations are reordered (like setfields). diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1039,9 +1039,6 @@ def execute_stm_transaction_break(self, _, really_wanted): pass - def execute_stm_set_location(self, _, int, ref): - pass - def execute_keepalive(self, descr, x): pass diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -515,12 +515,13 @@ TY_REF = 0x04 TY_FLOAT = 0x06 - def store_final_boxes(self, guard_op, boxes, metainterp_sd, stm_location): + def store_final_boxes(self, guard_op, boxes, metainterp_sd): guard_op.setfailargs(boxes) self.rd_count = len(boxes) self.guard_opnum = guard_op.getopnum() - if stm_location is not None: # constant-folded - self.stm_location_int, self.stm_location_ref = stm_location + if guard_op.stm_location is not None: # constant-folded if not stm + self.stm_location_int = guard_op.stm_location.num + self.stm_location_ref = guard_op.stm_location.ref # if metainterp_sd.warmrunnerdesc is not None: # for tests jitcounter = metainterp_sd.warmrunnerdesc.jitcounter diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -346,7 +346,6 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.LABEL, rop.STM_READ, - rop.STM_SET_LOCATION, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -761,20 +761,27 @@ # ____________________________________________________________ +class StmLocation(object): + def __init__(self, num, ref): + self.num = num + self.ref = ref + + class History(object): - def __init__(self): + def __init__(self, metainterp_sd): self.inputargs = None self.operations = [] + self.config = metainterp_sd.config + self.stm_location = None def record(self, opnum, argboxes, resbox, descr=None): op = ResOperation(opnum, argboxes, resbox, descr) - self.operations.append(op) + self.record_op(op) return op - def substitute_operation(self, position, opnum, argboxes, descr=None): - resbox = self.operations[position].result - op = ResOperation(opnum, argboxes, resbox, descr) - self.operations[position] = op + def record_op(self, op): + op.stm_location = self.stm_location + self.operations.append(op) # ____________________________________________________________ diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -360,12 +360,6 @@ self.call_pure_results = loop.call_pure_results self.stm_info = loop.stm_info - if metainterp_sd.config.translation.stm: - from rpython.rtyper.lltypesystem import lltype, llmemory - self.stm_location = (0, lltype.nullptr(llmemory.GCREF.TO)) - else: - self.stm_location = None - self.set_optimizations(optimizations) self.setup() @@ -577,8 +571,7 @@ raise resume.TagOverflow except resume.TagOverflow: raise compile.giveup() - descr.store_final_boxes(op, newboxes, self.metainterp_sd, - self.stm_location) + descr.store_final_boxes(op, newboxes, self.metainterp_sd) # if op.getopnum() == rop.GUARD_VALUE: if self.getvalue(op.getarg(0)) in self.bool_boxes: diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -84,25 +84,6 @@ self.keep_but_ignore_gnf = False self.emit_operation(op) - def optimize_DEBUG_MERGE_POINT(self, op): - self.emit_operation(op) - jdindex = op.getarg(0).getint() - jd = self.optimizer.metainterp_sd.warmrunnerdesc.jitdrivers_sd[jdindex] - report_location = jd.stm_report_location - if report_location is not None: - idx_num, idx_ref = report_location - num_box = op.getarg(3 + idx_num) - ref_box = op.getarg(3 + idx_ref) - loc_op = ResOperation(rop.STM_SET_LOCATION, [num_box, ref_box], - None) - self.optimize_STM_SET_LOCATION(loc_op) - - def optimize_STM_SET_LOCATION(self, op): - num = op.getarg(0).getint() - ref = op.getarg(1).getref_base() - self.optimizer.stm_location = (num, ref) - self.emit_operation(op) - dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', default=OptSTM.default_emit) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1123,6 +1123,15 @@ debug_print(loc) args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None) + # + if self.metainterp.staticdata.config.translation.stm: + report_location = jitdriver_sd.stm_report_location + if report_location is not None: + idx_num, idx_ref = report_location + num = greenkey[idx_num].getint() + ref = greenkey[idx_ref].getref_base() + location = history.StmLocation(num, ref) + self.metainterp.history.stm_location = location @arguments("box", "label") def opimpl_goto_if_exception_mismatch(self, vtablebox, next_exc_target): @@ -1857,7 +1866,7 @@ self.framestack[-1].pc = saved_pc def create_empty_history(self): - self.history = history.History() + self.history = history.History(self.staticdata) self.staticdata.stats.set_history(self.history) def _all_constants(self, *boxes): @@ -2080,10 +2089,9 @@ # if (self.staticdata.config.translation.stm and isinstance(key, compile.ResumeGuardDescr)): - self.history.record(rop.STM_SET_LOCATION, - [ConstInt(key.stm_location_int), - ConstPtr(key.stm_location_ref)], - None) + location = history.StmLocation(key.stm_location_int, + key.stm_location_ref) + self.history.stm_location = location # self.interpret() except SwitchToBlackhole, stb: @@ -2424,7 +2432,7 @@ rstack._stack_criticalcode_start() try: self.portal_call_depth = -1 # always one portal around - self.history = history.History() + self.history = history.History(self.staticdata) inputargs_and_holes = self.rebuild_state_after_failure(resumedescr, deadframe) self.history.inputargs = [box for box in inputargs_and_holes if box] @@ -2694,23 +2702,22 @@ def record_result_of_call_pure(self, resbox): """ Patch a CALL into a CALL_PURE. """ - op = self.history.operations[-1] + op = self.history.operations.pop() assert op.getopnum() == rop.CALL resbox_as_const = resbox.constbox() for i in range(op.numargs()): if not isinstance(op.getarg(i), Const): break else: - # all-constants: remove the CALL operation now and propagate a + # all-constants: keep the CALL operation removed, and propagate a # constant result - self.history.operations.pop() return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. arg_consts = [a.constbox() for a in op.getarglist()] self.call_pure_results[arg_consts] = resbox_as_const newop = op.copy_and_change(rop.CALL_PURE, args=op.getarglist()) - self.history.operations[-1] = newop + self.history.record_op(newop) return resbox def direct_assembler_call(self, targetjitdriver_sd): @@ -2727,7 +2734,7 @@ warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs) op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token) - self.history.operations.append(op) + self.history.record_op(op) # # To fix an obscure issue, make sure the vable stays alive # longer than the CALL_ASSEMBLER operation. We do it by diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -22,6 +22,7 @@ name = "" pc = 0 opnum = 0 + stm_location = None _cls_has_bool_result = False _attrs_ = ('result',) @@ -92,6 +93,7 @@ if descr is not None: descr = descr.clone_if_mutable() op = ResOperation(self.getopnum(), args[:], self.result, descr) + op.stm_location = self.stm_location if not we_are_translated(): op.name = self.name op.pc = self.pc @@ -504,7 +506,6 @@ 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) 'DEBUG_MERGE_POINT/*', # debugging only 'JIT_DEBUG/*', # debugging only - 'STM_SET_LOCATION/2', 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -88,6 +88,29 @@ got.append(op.getarglist()[-1].value) assert got == [42, -42, 42, 42, -42, 42] + def check_stm_locations(self, operations=None, cur_location="???"): + if operations is None: + from rpython.jit.metainterp.warmspot import get_stats + loop = get_stats().get_all_loops()[0] + operations = loop.operations + # + for op in operations: + if op.getopname() == "debug_merge_point": + num_box, ref_box = op.getarglist()[-2:] + num = num_box.getint() + ref = ref_box.getref_base() + cur_location = (num, ref) + elif op.getopname() in ("label", "finish", "jump"): + pass + else: + stmloc = op.stm_location + assert stmloc is not None, op + assert cur_location == (stmloc.num, stmloc.ref) + if (op.is_guard() and + hasattr(op.getdescr(), '_debug_suboperations')): + subops = op.getdescr()._debug_suboperations + self.check_stm_locations(subops, cur_location) + def test_stm_report_location(self): myjitdriver = JitDriver(greens = ['a', 'r'], reds = ['x', 'res'], stm_report_location = [0, 1]) @@ -112,17 +135,7 @@ res = self.meta_interp(main, [42, 10], translationoptions={"stm":True}) assert res == 55 self.check_resops(debug_merge_point=6) - self.check_resops(stm_set_location=6) # on the main loop - # - from rpython.jit.metainterp.warmspot import get_stats - seen = [] - for loop in get_stats().get_all_loops(): - for op in loop._all_operations(): - if op.getopname() == "stm_set_location": - seen.append(op) - assert len(seen) == 6 + 1 - op = seen[-1] - assert op.getarg(0).getint() == -42 + self.check_stm_locations() def test_stm_report_location_2(self): myjitdriver = JitDriver(greens = ['a', 'r'], reds = ['x', 'res', 'n'], @@ -150,17 +163,7 @@ res = self.meta_interp(main, [42, 10], translationoptions={"stm":True}) assert res == 55 self.check_resops(debug_merge_point=6) - # - from rpython.jit.metainterp.warmspot import get_stats - seen = [] - for loop in get_stats().get_all_loops(): - for op in loop._all_operations(): - if op.getopname() == "stm_set_location": - seen.append(op) - assert len(seen) == 6 + 2 - [op1, op2] = seen[-2:] - assert op1.getarg(0).getint() == -42 - assert op2.getarg(0).getint() == -42 + self.check_stm_locations() class TestLLtype(STMTests, LLJitMixin): From noreply at buildbot.pypy.org Mon Apr 21 18:07:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 18:07:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Test that setfield_gc maintain their original stm_location when Message-ID: <20140421160743.EEC731C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70812:f76f5363798a Date: 2014-04-21 18:06 +0200 http://bitbucket.org/pypy/pypy/changeset/f76f5363798a/ Log: Test that setfield_gc maintain their original stm_location when delayed by optimization diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -760,13 +760,6 @@ # ____________________________________________________________ - -class StmLocation(object): - def __init__(self, num, ref): - self.num = num - self.ref = ref - - class History(object): def __init__(self, metainterp_sd): self.inputargs = None diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -270,8 +270,17 @@ """ self.optimize_loop(ops, expected, expected_preamble=preamble) - - - - - + def test_stm_location_1(self): + ops = """ + [i1, p1] + setfield_gc(p1, i1, descr=adescr) {81} + call(i1, descr=nonwritedescr) {90} + jump(i1, p1) + """ + expected = """ + [i1, p1] + call(i1, descr=nonwritedescr) {90} + setfield_gc(p1, i1, descr=adescr) {81} + jump(i1, p1) + """ + self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -377,7 +377,8 @@ assert box1.__class__ == box2.__class__ remap[box2] = box1 assert equaloplists(optimized.operations, - expected.operations, False, remap, text_right) + expected.operations, False, remap, text_right, + expect_stm_locations_from_right=True) def _do_optimize_loop(self, loop, call_pure_results, stm_info): diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -125,7 +125,7 @@ # ____________________________________________________________ def equaloplists(oplist1, oplist2, strict_fail_args=True, remap={}, - text_right=None): + text_right=None, expect_stm_locations_from_right=False): # try to use the full width of the terminal to display the list # unfortunately, does not work with the default capture method of py.test # (which is fd), you you need to use either -s or --capture=sys, else you @@ -184,5 +184,8 @@ break else: assert False + if expect_stm_locations_from_right and op2.stm_location is not None: + assert op1.stm_location is not None + assert op1.stm_location.num == op2.stm_location.num assert len(oplist1) == len(oplist2) return True diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -12,7 +12,7 @@ from rpython.jit.metainterp.jitprof import EmptyProfiler from rpython.jit.metainterp.logger import Logger from rpython.jit.metainterp.optimizeopt.util import args_dict -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, StmLocation from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print, make_sure_not_resized from rpython.rlib.jit import Counters @@ -1130,7 +1130,7 @@ idx_num, idx_ref = report_location num = greenkey[idx_num].getint() ref = greenkey[idx_ref].getref_base() - location = history.StmLocation(num, ref) + location = StmLocation(num, ref) self.metainterp.history.stm_location = location @arguments("box", "label") @@ -2089,8 +2089,8 @@ # if (self.staticdata.config.translation.stm and isinstance(key, compile.ResumeGuardDescr)): - location = history.StmLocation(key.stm_location_int, - key.stm_location_ref) + location = StmLocation(key.stm_location_int, + key.stm_location_ref) self.history.stm_location = location # self.interpret() diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -371,6 +371,12 @@ self._args[i] = box +class StmLocation(object): + def __init__(self, num, ref): + self.num = num + self.ref = ref + + # ____________________________________________________________ _oplist = [ diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -7,7 +7,7 @@ from rpython.jit.tool.oparser_model import get_model from rpython.jit.metainterp.resoperation import rop, ResOperation, \ ResOpWithDescr, N_aryOp, \ - UnaryOp, PlainResOp + UnaryOp, PlainResOp, StmLocation r_skip_thread = re.compile(r'^(\d+#)?') @@ -223,6 +223,8 @@ if rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST: i = line.find('[', endnum) + 1 j = line.find(']', i) + if j >= 0: + endnum = j + 1 if (i <= 0 or j <= 0) and not self.nonstrict: raise ParseError("missing fail_args for guard operation") fail_args = [] @@ -251,7 +253,16 @@ if descr is None and self.invent_fail_descr: descr = self.original_jitcell_token - return opnum, args, descr, fail_args + if line.find('{', endnum) >= 0: + i = line.find('{', endnum) + 1 + j = line.find('}', i) + if j < 0: + raise ParseError("missing '}' after '{'") + stm_location = int(line[i:j].strip()) + else: + stm_location = None + + return opnum, args, descr, fail_args, stm_location def create_op(self, opnum, args, result, descr): if opnum == ESCAPE_OP.OPNUM: @@ -271,7 +282,7 @@ res, op = line.split("=", 1) res = res.strip() op = op.strip() - opnum, args, descr, fail_args = self.parse_op(op) + opnum, args, descr, fail_args, stm_location = self.parse_op(op) if res in self.vars: raise ParseError("Double assign to var %s in line: %s" % (res, line)) rvar = self.box_for_var(res) @@ -279,13 +290,17 @@ res = self.create_op(opnum, args, rvar, descr) if fail_args is not None: res.setfailargs(fail_args) + if stm_location is not None: + res.stm_location = StmLocation(stm_location, '?') return res def parse_op_no_result(self, line): - opnum, args, descr, fail_args = self.parse_op(line) + opnum, args, descr, fail_args, stm_location = self.parse_op(line) res = self.create_op(opnum, args, None, descr) if fail_args is not None: res.setfailargs(fail_args) + if stm_location is not None: + res.stm_location = StmLocation(stm_location, '?') return res def parse_next_op(self, line): diff --git a/rpython/jit/tool/test/test_oparser.py b/rpython/jit/tool/test/test_oparser.py --- a/rpython/jit/tool/test/test_oparser.py +++ b/rpython/jit/tool/test/test_oparser.py @@ -184,7 +184,7 @@ def test_attach_comment_to_loop(self): loop = self.parse(self.example_loop_log, no_namespace=True) - assert loop.comment == ' # bridge out of Guard12, 6 ops' + assert loop.comment.lstrip() == '# bridge out of Guard12, 6 ops' def test_parse_new_with_comma(self): # this is generated by PYPYJITLOG, check that we can handle it @@ -233,6 +233,17 @@ assert len(loop.operations) == 2 assert loop.last_offset == 30 + def test_stm_location(self): + x = """ + [i0] + p1 = escape(i0) {42} + p2 = int_add(i0, i0) {81} + """ + loop = self.parse(x) + [op0, op1] = loop.operations + assert op0.stm_location.num == 42 + assert op1.stm_location.num == 81 + class TestOpParser(BaseTestOparser): From noreply at buildbot.pypy.org Mon Apr 21 18:10:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 18:10:29 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add a comment Message-ID: <20140421161029.EF6201C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70813:099f24beeb6d Date: 2014-04-21 18:09 +0200 http://bitbucket.org/pypy/pypy/changeset/099f24beeb6d/ Log: Add a comment diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -271,6 +271,8 @@ self.optimize_loop(ops, expected, expected_preamble=preamble) def test_stm_location_1(self): + # This tests setfield_gc on a non-virtual. On a virtual, it doesn't + # really matter, because STM conflicts are impossible anyway ops = """ [i1, p1] setfield_gc(p1, i1, descr=adescr) {81} From noreply at buildbot.pypy.org Mon Apr 21 18:23:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 18:23:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Next tests to pass Message-ID: <20140421162306.9F0A11C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70814:ea3d7a528f1a Date: 2014-04-21 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/ea3d7a528f1a/ Log: Next tests to pass diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -110,7 +110,8 @@ operations = self.gc_ll_descr.rewrite_assembler(self.cpu, ops.operations, []) - equaloplists(operations, expected.operations) + equaloplists(operations, expected.operations, + expect_stm_locations_from_right=True) lltype.free(frame_info, flavor='raw') class FakeTracker(object): diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1187,3 +1187,45 @@ %(comment)s stm_read(p4) jump(p2) """ % d, uxdescr=uxdescr, vdescr=vdescr) + + def test_stm_location_1(self): + self.check_rewrite(""" + [p1, p2] + setfield_gc(p1, p2, descr=tzdescr) {50} + jump() + """, """ + [p1, p2] + cond_call_gc_wb(p1, descr=wbdescr) {50} + setfield_gc(p1, p2, descr=tzdescr) {50} + jump() + """) + + def test_stm_location_2(self): + self.check_rewrite(""" + [i1] + i3 = getfield_raw(i1, descr=tydescr) {52} + jump(i3) + """, """ + [i1] + $INEV {52} + i3 = getfield_raw(i1, descr=tydescr) {52} + jump(i3) + """) + + def test_stm_location_3(self): + self.check_rewrite(""" + [i0, f0] + i2 = call_assembler(i0, f0, descr=casmdescr) {54} + guard_not_forced() [] {55} + """, """ + [i0, f0] + i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth) + p1 = call_malloc_nursery_varsize_frame(i1) + setfield_gc(p1, 0, descr=tiddescr) + setfield_gc(p1, i1, descr=framelendescr) + setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info) + setarrayitem_gc(p1, 0, i0, descr=signedframedescr) + setarrayitem_gc(p1, 1, f0, descr=floatframedescr) + i3 = call_assembler(p1, descr=casmdescr) {54} + guard_not_forced() [] {55} + """) From noreply at buildbot.pypy.org Mon Apr 21 18:23:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 18:23:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Pass the tests Message-ID: <20140421162307.C20B21C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70815:cd1d172bfa05 Date: 2014-04-21 18:22 +0200 http://bitbucket.org/pypy/pypy/changeset/cd1d172bfa05/ Log: Pass the tests diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -229,8 +229,10 @@ args = [frame, arglist[jd.index_of_virtualizable]] else: args = [frame] - self.newops.append(ResOperation(rop.CALL_ASSEMBLER, args, - op.result, op.getdescr())) + op1 = ResOperation(rop.CALL_ASSEMBLER, args, + op.result, op.getdescr()) + op1.stm_location = op.stm_location + self.newops.append(op1) # ---------- @@ -406,29 +408,31 @@ def handle_write_barrier_setfield(self, op): val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(1)): - self.gen_write_barrier(val) + self.gen_write_barrier(val, op.stm_location) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(2)): - self.gen_write_barrier(val) + self.gen_write_barrier(val, op.stm_location) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(2)): - self.gen_write_barrier_array(val, op.getarg(1)) + self.gen_write_barrier_array(val, op.getarg(1), op.stm_location) self.newops.append(op) - def gen_write_barrier(self, v_base): + def gen_write_barrier(self, v_base, stm_location): write_barrier_descr = self.gc_ll_descr.write_barrier_descr args = [v_base] - self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, - descr=write_barrier_descr)) + op = ResOperation(rop.COND_CALL_GC_WB, args, None, + descr=write_barrier_descr) + op.stm_location = stm_location + self.newops.append(op) self.write_barrier_applied[v_base] = None - def gen_write_barrier_array(self, v_base, v_index): + def gen_write_barrier_array(self, v_base, v_index, stm_location): write_barrier_descr = self.gc_ll_descr.write_barrier_descr if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too @@ -439,14 +443,15 @@ if length >= LARGE: # unknown or too big: produce a write_barrier_from_array args = [v_base, v_index] - self.newops.append( - ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, - descr=write_barrier_descr)) + op = ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None, + descr=write_barrier_descr) + op.stm_location = stm_location + self.newops.append(op) # a WB_ARRAY is not enough to prevent any future write # barriers, so don't add to 'write_barrier_applied'! return # fall-back case: produce a write_barrier - self.gen_write_barrier(v_base) + self.gen_write_barrier(v_base, stm_location) def round_up_for_allocation(self, size): if not self.gc_ll_descr.round_up: diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -126,17 +126,19 @@ @specialize.arg(1) - def _do_stm_call(self, funcname, args, result): + def _do_stm_call(self, funcname, args, result, stm_location): addr = self.gc_ll_descr.get_malloc_fn_addr(funcname) descr = getattr(self.gc_ll_descr, funcname + '_descr') op1 = ResOperation(rop.CALL, [ConstInt(addr)] + args, result, descr=descr) + op1.stm_location = stm_location self.newops.append(op1) def fallback_inevitable(self, op): if not self.always_inevitable: self.emitting_an_operation_that_can_collect() - self._do_stm_call('stm_try_inevitable', [], None) + self._do_stm_call('stm_try_inevitable', [], None, + op.stm_location) self.always_inevitable = True self.newops.append(op) debug_print("fallback for", op.repr()) @@ -153,5 +155,5 @@ def handle_setters_for_pure_fields(self, op, targetindex): val = op.getarg(targetindex) if self.must_apply_write_barrier(val): - self.gen_write_barrier(val) + self.gen_write_barrier(val, op.stm_location) self.newops.append(op) From noreply at buildbot.pypy.org Mon Apr 21 18:26:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 18:26:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Preserve the stm_location when introducing CALLs Message-ID: <20140421162642.A01AD1C0721@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70816:fa2cc2d46b29 Date: 2014-04-21 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/fa2cc2d46b29/ Log: Preserve the stm_location when introducing CALLs diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -72,8 +72,9 @@ # replace CALL_PURE with just CALL args = op.getarglist() - self.emit_operation(ResOperation(rop.CALL, args, op.result, - op.getdescr())) + op1 = ResOperation(rop.CALL, args, op.result, op.getdescr()) + op1.stm_location = op.stm_location + self.emit_operation(op1) def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -16,8 +16,9 @@ def optimize_CALL_PURE(self, op): args = op.getarglist() - self.emit_operation(ResOperation(rop.CALL, args, op.result, - op.getdescr())) + op1 = ResOperation(rop.CALL, args, op.result, op.getdescr()) + op1.stm_location = op.stm_location + self.emit_operation(op1) def optimize_CALL_LOOPINVARIANT(self, op): op = op.copy_and_change(rop.CALL) From noreply at buildbot.pypy.org Mon Apr 21 18:36:31 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 18:36:31 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Untabify Python.asdl. Only whitespace/comment changes. Message-ID: <20140421163631.8C8741C0721@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70817:2e7421f70317 Date: 2014-04-21 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/2e7421f70317/ Log: Untabify Python.asdl. Only whitespace/comment changes. diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl --- a/pypy/interpreter/astcompiler/tools/Python.asdl +++ b/pypy/interpreter/astcompiler/tools/Python.asdl @@ -1,123 +1,122 @@ --- ASDL's four builtin types are identifier, int, string, object +-- ASDL's five builtin types are identifier, int, string, bytes, object -module Python version "$Revision$" +module Python { - mod = Module(stmt* body) - | Interactive(stmt* body) - | Expression(expr body) + mod = Module(stmt* body) + | Interactive(stmt* body) + | Expression(expr body) - -- not really an actual node but useful in Jython's typesystem. - | Suite(stmt* body) + -- not really an actual node but useful in Jython's typesystem. + | Suite(stmt* body) - stmt = FunctionDef(identifier name, arguments args, + stmt = FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list, expr? returns) - | ClassDef(identifier name, - expr* bases, - keyword* keywords, - expr? starargs, - expr? kwargs, - stmt* body, - expr* decorator_list) - | Return(expr? value) + | ClassDef(identifier name, + expr* bases, + keyword* keywords, + expr? starargs, + expr? kwargs, + stmt* body, + expr* decorator_list) + | Return(expr? value) - | Delete(expr* targets) - | Assign(expr* targets, expr value) - | AugAssign(expr target, operator op, expr value) + | Delete(expr* targets) + | Assign(expr* targets, expr value) + | AugAssign(expr target, operator op, expr value) - -- use 'orelse' because else is a keyword in target languages - | For(expr target, expr iter, stmt* body, stmt* orelse) - | While(expr test, stmt* body, stmt* orelse) - | If(expr test, stmt* body, stmt* orelse) - | With(expr context_expr, expr? optional_vars, stmt* body) + -- use 'orelse' because else is a keyword in target languages + | For(expr target, expr iter, stmt* body, stmt* orelse) + | While(expr test, stmt* body, stmt* orelse) + | If(expr test, stmt* body, stmt* orelse) + | With(expr context_expr, expr? optional_vars, stmt* body) - | Raise(expr? exc, expr? cause) - | TryExcept(stmt* body, excepthandler* handlers, stmt* orelse) - | TryFinally(stmt* body, stmt* finalbody) - | Assert(expr test, expr? msg) + | Raise(expr? exc, expr? cause) + | TryExcept(stmt* body, excepthandler* handlers, stmt* orelse) + | TryFinally(stmt* body, stmt* finalbody) + | Assert(expr test, expr? msg) - | Import(alias* names) - | ImportFrom(identifier? module, alias* names, int? level) + | Import(alias* names) + | ImportFrom(identifier? module, alias* names, int? level) - | Global(identifier* names) - | Nonlocal(identifier* names) - | Expr(expr value) - | Pass | Break | Continue + | Global(identifier* names) + | Nonlocal(identifier* names) + | Expr(expr value) + | Pass | Break | Continue - -- XXX Jython will be different - -- col_offset is the byte offset in the utf8 string the parser uses - attributes (int lineno, int col_offset) + -- XXX Jython will be different + -- col_offset is the byte offset in the utf8 string the parser uses + attributes (int lineno, int col_offset) - -- BoolOp() can use left & right? - expr = BoolOp(boolop op, expr* values) - | BinOp(expr left, operator op, expr right) - | UnaryOp(unaryop op, expr operand) - | Lambda(arguments args, expr body) - | IfExp(expr test, expr body, expr orelse) - | Dict(expr* keys, expr* values) - | Set(expr* elts) - | ListComp(expr elt, comprehension* generators) - | SetComp(expr elt, comprehension* generators) - | DictComp(expr key, expr value, comprehension* generators) - | GeneratorExp(expr elt, comprehension* generators) - -- the grammar constrains where yield expressions can occur - | Yield(expr? value) - | YieldFrom(expr value) - -- need sequences for compare to distinguish between - -- x < 4 < 3 and (x < 4) < 3 - | Compare(expr left, cmpop* ops, expr* comparators) - | Call(expr func, expr* args, keyword* keywords, - expr? starargs, expr? kwargs) - | Num(object n) -- a number as a PyObject. - | Str(string s) -- need to specify raw, unicode, etc? - | Bytes(string s) - | Ellipsis - -- other literals? bools? + -- BoolOp() can use left & right? + expr = BoolOp(boolop op, expr* values) + | BinOp(expr left, operator op, expr right) + | UnaryOp(unaryop op, expr operand) + | Lambda(arguments args, expr body) + | IfExp(expr test, expr body, expr orelse) + | Dict(expr* keys, expr* values) + | Set(expr* elts) + | ListComp(expr elt, comprehension* generators) + | SetComp(expr elt, comprehension* generators) + | DictComp(expr key, expr value, comprehension* generators) + | GeneratorExp(expr elt, comprehension* generators) + -- the grammar constrains where yield expressions can occur + | Yield(expr? value) + | YieldFrom(expr value) + -- need sequences for compare to distinguish between + -- x < 4 < 3 and (x < 4) < 3 + | Compare(expr left, cmpop* ops, expr* comparators) + | Call(expr func, expr* args, keyword* keywords, + expr? starargs, expr? kwargs) + | Num(object n) -- a number as a PyObject. + | Str(string s) -- need to specify raw, unicode, etc? + | Bytes(string s) + | Ellipsis + -- other literals? bools? - -- the following expression can appear in assignment context - | Attribute(expr value, identifier attr, expr_context ctx) - | Subscript(expr value, slice slice, expr_context ctx) - | Starred(expr value, expr_context ctx) - | Name(identifier id, expr_context ctx) - | List(expr* elts, expr_context ctx) - | Tuple(expr* elts, expr_context ctx) + -- the following expression can appear in assignment context + | Attribute(expr value, identifier attr, expr_context ctx) + | Subscript(expr value, slice slice, expr_context ctx) + | Starred(expr value, expr_context ctx) + | Name(identifier id, expr_context ctx) + | List(expr* elts, expr_context ctx) + | Tuple(expr* elts, expr_context ctx) - -- PyPy modification - | Const(object value) + -- PyPy modification + | Const(object value) - -- col_offset is the byte offset in the utf8 string the parser uses - attributes (int lineno, int col_offset) + -- col_offset is the byte offset in the utf8 string the parser uses + attributes (int lineno, int col_offset) - expr_context = Load | Store | Del | AugLoad | AugStore | Param + expr_context = Load | Store | Del | AugLoad | AugStore | Param - slice = Slice(expr? lower, expr? upper, expr? step) - | ExtSlice(slice* dims) - | Index(expr value) + slice = Slice(expr? lower, expr? upper, expr? step) + | ExtSlice(slice* dims) + | Index(expr value) - boolop = And | Or + boolop = And | Or - operator = Add | Sub | Mult | Div | Mod | Pow | LShift + operator = Add | Sub | Mult | Div | Mod | Pow | LShift | RShift | BitOr | BitXor | BitAnd | FloorDiv - unaryop = Invert | Not | UAdd | USub + unaryop = Invert | Not | UAdd | USub - cmpop = Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn + cmpop = Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn - comprehension = (expr target, expr iter, expr* ifs) + comprehension = (expr target, expr iter, expr* ifs) - -- not sure what to call the first argument for raise and except - excepthandler = ExceptHandler(expr? type, identifier? name, stmt* body) - attributes (int lineno, int col_offset) + excepthandler = ExceptHandler(expr? type, identifier? name, stmt* body) + attributes (int lineno, int col_offset) - arguments = (arg* args, identifier? vararg, expr? varargannotation, + arguments = (arg* args, identifier? vararg, expr? varargannotation, arg* kwonlyargs, identifier? kwarg, expr? kwargannotation, expr* defaults, expr* kw_defaults) - arg = (identifier arg, expr? annotation) + arg = (identifier arg, expr? annotation) - -- keyword arguments supplied to call - keyword = (identifier arg, expr value) + -- keyword arguments supplied to call + keyword = (identifier arg, expr value) - -- import name with optional 'as' alias. - alias = (identifier name, identifier? asname) + -- import name with optional 'as' alias. + alias = (identifier name, identifier? asname) } From noreply at buildbot.pypy.org Mon Apr 21 18:36:32 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 18:36:32 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Don't parse version from Python.asdl anymore. Message-ID: <20140421163632.B23621C0721@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70818:19c656d8c9ca Date: 2014-04-21 16:08 +0200 http://bitbucket.org/pypy/pypy/changeset/19c656d8c9ca/ Log: Don't parse version from Python.asdl anymore. diff --git a/pypy/interpreter/astcompiler/tools/asdl.py b/pypy/interpreter/astcompiler/tools/asdl.py --- a/pypy/interpreter/astcompiler/tools/asdl.py +++ b/pypy/interpreter/astcompiler/tools/asdl.py @@ -108,26 +108,19 @@ def error(self, tok): raise ASDLSyntaxError(tok.lineno, tok) - def p_module_0(self, (module, name, version, _0, _1)): - " module ::= Id Id version { } " + def p_module_0(self, (module, name, _0, _1)): + " module ::= Id Id { } " if module.value != "module": raise ASDLSyntaxError(module.lineno, msg="expected 'module', found %s" % module) - return Module(name, None, version) + return Module(name, None) - def p_module(self, (module, name, version, _0, definitions, _1)): - " module ::= Id Id version { definitions } " + def p_module(self, (module, name, _0, definitions, _1)): + " module ::= Id Id { definitions } " if module.value != "module": raise ASDLSyntaxError(module.lineno, msg="expected 'module', found %s" % module) - return Module(name, definitions, version) - - def p_version(self, (version, V)): - "version ::= Id String" - if version.value != "version": - raise ASDLSyntaxError(version.lineno, - msg="expected 'version', found %" % version) - return V + return Module(name, definitions) def p_definition_0(self, (definition,)): " definitions ::= definition " @@ -228,10 +221,9 @@ pass # a marker class class Module(AST): - def __init__(self, name, dfns, version): + def __init__(self, name, dfns): self.name = name self.dfns = dfns - self.version = version self.types = {} # maps type name to value (from dfns) for type in dfns: self.types[type.name.value] = type.value From noreply at buildbot.pypy.org Mon Apr 21 18:36:34 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 18:36:34 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Reflect "with" statements with multiple items in the AST. Message-ID: <20140421163634.0A7561C0721@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70819:0461ffa6c86b Date: 2014-04-21 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/0461ffa6c86b/ Log: Reflect "with" statements with multiple items in the AST. (CPython issue #12106) diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -683,34 +683,39 @@ class With(stmt): - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): - self.context_expr = context_expr - self.optional_vars = optional_vars + def __init__(self, items, body, lineno, col_offset): + self.items = items + self.w_items = None self.body = body self.w_body = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 + self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_With(self) def mutate_over(self, visitor): - self.context_expr = self.context_expr.mutate_over(visitor) - if self.optional_vars: - self.optional_vars = self.optional_vars.mutate_over(visitor) + if self.items: + visitor._mutate_sequence(self.items) if self.body: visitor._mutate_sequence(self.body) return visitor.visit_With(self) def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 23: - self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') + if (self.initialization_state & ~0) ^ 15: + self.missing_field(space, ['lineno', 'col_offset', 'items', 'body'], 'With') else: - if not self.initialization_state & 8: - self.optional_vars = None - self.context_expr.sync_app_attrs(space) - if self.optional_vars: - self.optional_vars.sync_app_attrs(space) + pass + w_list = self.w_items + if w_list is not None: + list_w = space.listview(w_list) + if list_w: + self.items = [space.interp_w(withitem, w_obj) for w_obj in list_w] + else: + self.items = None + if self.items is not None: + for node in self.items: + node.sync_app_attrs(space) w_list = self.w_body if w_list is not None: list_w = space.listview(w_list) @@ -2506,6 +2511,32 @@ if not self.initialization_state & 2: self.asname = None +class withitem(AST): + + def __init__(self, context_expr, optional_vars): + self.context_expr = context_expr + self.optional_vars = optional_vars + self.initialization_state = 3 + + def mutate_over(self, visitor): + self.context_expr = self.context_expr.mutate_over(visitor) + if self.optional_vars: + self.optional_vars = self.optional_vars.mutate_over(visitor) + return visitor.visit_withitem(self) + + def walkabout(self, visitor): + visitor.visit_withitem(self) + + def sync_app_attrs(self, space): + if (self.initialization_state & ~2) ^ 1: + self.missing_field(space, ['context_expr', None], 'withitem') + else: + if not self.initialization_state & 2: + self.optional_vars = None + self.context_expr.sync_app_attrs(space) + if self.optional_vars: + self.optional_vars.sync_app_attrs(space) + class ASTVisitor(object): def visit_sequence(self, seq): @@ -2649,6 +2680,8 @@ return self.default_visitor(node) def visit_alias(self, node): return self.default_visitor(node) + def visit_withitem(self, node): + return self.default_visitor(node) class GenericASTVisitor(ASTVisitor): @@ -2713,9 +2746,7 @@ self.visit_sequence(node.orelse) def visit_With(self, node): - node.context_expr.walkabout(self) - if node.optional_vars: - node.optional_vars.walkabout(self) + self.visit_sequence(node.items) self.visit_sequence(node.body) def visit_Raise(self, node): @@ -2903,6 +2934,11 @@ def visit_alias(self, node): pass + def visit_withitem(self, node): + node.context_expr.walkabout(self) + if node.optional_vars: + node.optional_vars.walkabout(self) + mod.typedef = typedef.TypeDef("mod", AST.typedef, @@ -4163,66 +4199,30 @@ __init__=interp2app(If_init), ) -def With_get_context_expr(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'context_expr') - if w_obj is not None: - return w_obj +def With_get_items(space, w_self): if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'context_expr') - return space.wrap(w_self.context_expr) - -def With_set_context_expr(space, w_self, w_new_value): - try: - w_self.context_expr = space.interp_w(expr, w_new_value, False) - if type(w_self.context_expr) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'context_expr', w_new_value) - w_self.initialization_state &= ~4 - return - w_self.deldictvalue(space, 'context_expr') + raise_attriberr(space, w_self, 'items') + if w_self.w_items is None: + if w_self.items is None: + list_w = [] + else: + list_w = [space.wrap(node) for node in w_self.items] + w_list = space.newlist(list_w) + w_self.w_items = w_list + return w_self.w_items + +def With_set_items(space, w_self, w_new_value): + w_self.w_items = w_new_value w_self.initialization_state |= 4 -def With_del_context_expr(space, w_self): - # Check if the element exists, raise appropriate exceptions - With_get_context_expr(space, w_self) - w_self.deldictvalue(space, 'context_expr') +def With_del_items(space, w_self): + # Check if the element exists, raise appropriate exceptions + With_get_items(space, w_self) + w_self.deldictvalue(space, 'items') w_self.initialization_state &= ~4 -def With_get_optional_vars(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'optional_vars') - if w_obj is not None: - return w_obj +def With_get_body(space, w_self): if not w_self.initialization_state & 8: - raise_attriberr(space, w_self, 'optional_vars') - return space.wrap(w_self.optional_vars) - -def With_set_optional_vars(space, w_self, w_new_value): - try: - w_self.optional_vars = space.interp_w(expr, w_new_value, True) - if type(w_self.optional_vars) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'optional_vars', w_new_value) - w_self.initialization_state &= ~8 - return - w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state |= 8 - -def With_del_optional_vars(space, w_self): - # Check if the element exists, raise appropriate exceptions - With_get_optional_vars(space, w_self) - w_self.deldictvalue(space, 'optional_vars') - w_self.initialization_state &= ~8 - -def With_get_body(space, w_self): - if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: @@ -4235,22 +4235,23 @@ def With_set_body(space, w_self, w_new_value): w_self.w_body = w_new_value - w_self.initialization_state |= 16 + w_self.initialization_state |= 8 def With_del_body(space, w_self): # Check if the element exists, raise appropriate exceptions With_get_body(space, w_self) w_self.deldictvalue(space, 'body') - w_self.initialization_state &= ~16 - -_With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) + w_self.initialization_state &= ~8 + +_With_field_unroller = unrolling_iterable(['items', 'body']) def With_init(space, w_self, __args__): w_self = space.descr_self_interp_w(With, w_self) + w_self.w_items = None w_self.w_body = None args_w, kwargs_w = __args__.unpack() if args_w: - if len(args_w) != 3: - w_err = space.wrap("With constructor takes either 0 or 3 positional arguments") + if len(args_w) != 2: + w_err = space.wrap("With constructor takes either 0 or 2 positional arguments") raise OperationError(space.w_TypeError, w_err) i = 0 for field in _With_field_unroller: @@ -4262,9 +4263,8 @@ With.typedef = typedef.TypeDef("With", stmt.typedef, __module__='_ast', - _fields=_FieldsWrapper(['context_expr', 'optional_vars', 'body']), - context_expr=typedef.GetSetProperty(With_get_context_expr, With_set_context_expr, With_del_context_expr, cls=With), - optional_vars=typedef.GetSetProperty(With_get_optional_vars, With_set_optional_vars, With_del_optional_vars, cls=With), + _fields=_FieldsWrapper(['items', 'body']), + items=typedef.GetSetProperty(With_get_items, With_set_items, With_del_items, cls=With), body=typedef.GetSetProperty(With_get_body, With_set_body, With_del_body, cls=With), __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), @@ -8365,3 +8365,86 @@ __init__=interp2app(alias_init), ) +def withitem_get_context_expr(space, w_self): + if w_self.w_dict is not None: + w_obj = w_self.getdictvalue(space, 'context_expr') + if w_obj is not None: + return w_obj + if not w_self.initialization_state & 1: + raise_attriberr(space, w_self, 'context_expr') + return space.wrap(w_self.context_expr) + +def withitem_set_context_expr(space, w_self, w_new_value): + try: + w_self.context_expr = space.interp_w(expr, w_new_value, False) + if type(w_self.context_expr) is expr: + raise OperationError(space.w_TypeError, space.w_None) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + w_self.setdictvalue(space, 'context_expr', w_new_value) + w_self.initialization_state &= ~1 + return + w_self.deldictvalue(space, 'context_expr') + w_self.initialization_state |= 1 + +def withitem_del_context_expr(space, w_self): + # Check if the element exists, raise appropriate exceptions + withitem_get_context_expr(space, w_self) + w_self.deldictvalue(space, 'context_expr') + w_self.initialization_state &= ~1 + +def withitem_get_optional_vars(space, w_self): + if w_self.w_dict is not None: + w_obj = w_self.getdictvalue(space, 'optional_vars') + if w_obj is not None: + return w_obj + if not w_self.initialization_state & 2: + raise_attriberr(space, w_self, 'optional_vars') + return space.wrap(w_self.optional_vars) + +def withitem_set_optional_vars(space, w_self, w_new_value): + try: + w_self.optional_vars = space.interp_w(expr, w_new_value, True) + if type(w_self.optional_vars) is expr: + raise OperationError(space.w_TypeError, space.w_None) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + w_self.setdictvalue(space, 'optional_vars', w_new_value) + w_self.initialization_state &= ~2 + return + w_self.deldictvalue(space, 'optional_vars') + w_self.initialization_state |= 2 + +def withitem_del_optional_vars(space, w_self): + # Check if the element exists, raise appropriate exceptions + withitem_get_optional_vars(space, w_self) + w_self.deldictvalue(space, 'optional_vars') + w_self.initialization_state &= ~2 + +_withitem_field_unroller = unrolling_iterable(['context_expr', 'optional_vars']) +def withitem_init(space, w_self, __args__): + w_self = space.descr_self_interp_w(withitem, w_self) + args_w, kwargs_w = __args__.unpack() + if args_w: + if len(args_w) != 2: + w_err = space.wrap("withitem constructor takes either 0 or 2 positional arguments") + raise OperationError(space.w_TypeError, w_err) + i = 0 + for field in _withitem_field_unroller: + space.setattr(w_self, space.wrap(field), args_w[i]) + i += 1 + for field, w_value in kwargs_w.iteritems(): + space.setattr(w_self, space.wrap(field), w_value) + +withitem.typedef = typedef.TypeDef("withitem", + AST.typedef, + __module__='_ast', + _fields=_FieldsWrapper(['context_expr', 'optional_vars']), + context_expr=typedef.GetSetProperty(withitem_get_context_expr, withitem_set_context_expr, withitem_del_context_expr, cls=withitem), + optional_vars=typedef.GetSetProperty(withitem_get_optional_vars, withitem_set_optional_vars, withitem_del_optional_vars, cls=withitem), + __new__=interp2app(get_AST_new(withitem)), + __init__=interp2app(withitem_init), +) + diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -433,6 +433,21 @@ body = [wi] return wi + def handle_with_item(self, item_node): + test = self.handle_expr(item_node.children[0]) + if len(item_node.children) == 3: + target = self.handle_expr(item_node.children[2]) + self.set_context(target, ast.Store) + else: + target = None + return ast.withitem(test, target) + + def handle_with_stmt(self, with_node): + body = self.handle_suite(with_node.children[-1]) + items = [self.handle_with_item(with_node.children[i]) + for i in range(1, len(with_node.children)-2, 2)] + return ast.With(items, body, with_node.lineno, with_node.column) + def handle_classdef(self, classdef_node, decorators=None): name_node = classdef_node.children[1] name = self.new_identifier(name_node.value) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -803,17 +803,24 @@ def visit_With(self, wih): self.update_position(wih.lineno, True) + self.handle_withitem(wih, 0) + + def handle_withitem(self, wih, pos): body_block = self.new_block() cleanup = self.new_block() - wih.context_expr.walkabout(self) + witem = wih.items[pos] + witem.context_expr.walkabout(self) self.emit_jump(ops.SETUP_WITH, cleanup) self.use_next_block(body_block) self.push_frame_block(F_BLOCK_FINALLY, body_block) - if wih.optional_vars: - wih.optional_vars.walkabout(self) + if witem.optional_vars: + witem.optional_vars.walkabout(self) else: self.emit_op(ops.POP_TOP) - self.visit_sequence(wih.body) + if pos == len(wih.items) - 1: + self.visit_sequence(wih.body) + else: + self.handle_withitem(wih, pos + 1) self.emit_op(ops.POP_BLOCK) self.pop_frame_block(F_BLOCK_FINALLY, body_block) self.load_const(self.space.w_None) diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -499,15 +499,16 @@ def visit_With(self, wih): self.scope.new_temporary_name() - if wih.optional_vars: - self.scope.new_temporary_name() - wih.context_expr.walkabout(self) - if wih.optional_vars: - wih.optional_vars.walkabout(self) + self.visit_sequence(wih.items) self.scope.note_try_start(wih) self.visit_sequence(wih.body) self.scope.note_try_end(wih) + def visit_withitem(self, witem): + witem.context_expr.walkabout(self) + if witem.optional_vars: + witem.optional_vars.walkabout(self) + def visit_arguments(self, arguments): scope = self.scope assert isinstance(scope, FunctionScope) # Annotator hint. diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -405,33 +405,33 @@ def test_with(self): wi = self.get_first_stmt("with x: pass") assert isinstance(wi, ast.With) - assert isinstance(wi.context_expr, ast.Name) + assert len(wi.items) == 1 + assert isinstance(wi.items[0], ast.withitem) + assert isinstance(wi.items[0].context_expr, ast.Name) + assert wi.items[0].optional_vars is None assert len(wi.body) == 1 - assert wi.optional_vars is None wi = self.get_first_stmt("with x as y: pass") - assert isinstance(wi.context_expr, ast.Name) + assert isinstance(wi.items[0].context_expr, ast.Name) assert len(wi.body) == 1 - assert isinstance(wi.optional_vars, ast.Name) - assert wi.optional_vars.ctx == ast.Store + assert isinstance(wi.items[0].optional_vars, ast.Name) + assert wi.items[0].optional_vars.ctx == ast.Store wi = self.get_first_stmt("with x as (y,): pass") - assert isinstance(wi.optional_vars, ast.Tuple) - assert len(wi.optional_vars.elts) == 1 - assert wi.optional_vars.ctx == ast.Store - assert wi.optional_vars.elts[0].ctx == ast.Store + assert isinstance(wi.items[0].optional_vars, ast.Tuple) + assert len(wi.items[0].optional_vars.elts) == 1 + assert wi.items[0].optional_vars.ctx == ast.Store + assert wi.items[0].optional_vars.elts[0].ctx == ast.Store input = "with x hi y: pass" exc = py.test.raises(SyntaxError, self.get_ast, input).value wi = self.get_first_stmt("with x as y, b: pass") assert isinstance(wi, ast.With) - assert isinstance(wi.context_expr, ast.Name) - assert wi.context_expr.id == "x" - assert isinstance(wi.optional_vars, ast.Name) - assert wi.optional_vars.id == "y" - assert len(wi.body) == 1 - wi = wi.body[0] - assert isinstance(wi, ast.With) - assert isinstance(wi.context_expr, ast.Name) - assert wi.context_expr.id == "b" - assert wi.optional_vars is None + assert len(wi.items) == 2 + assert isinstance(wi.items[0].context_expr, ast.Name) + assert wi.items[0].context_expr.id == "x" + assert isinstance(wi.items[0].optional_vars, ast.Name) + assert wi.items[0].optional_vars.id == "y" + assert isinstance(wi.items[1].context_expr, ast.Name) + assert wi.items[1].context_expr.id == "b" + assert wi.items[1].optional_vars is None assert len(wi.body) == 1 assert isinstance(wi.body[0], ast.Pass) diff --git a/pypy/interpreter/astcompiler/test/test_validate.py b/pypy/interpreter/astcompiler/test/test_validate.py --- a/pypy/interpreter/astcompiler/test/test_validate.py +++ b/pypy/interpreter/astcompiler/test/test_validate.py @@ -170,16 +170,15 @@ [ast.Expr(ast.Name("x", ast.Store, 0, 0), 0, 0)], 0, 0) self.stmt(i, "must have Load context") - @skip("enable when parser uses the new With construct") def test_with(self): p = ast.Pass(0, 0) - self.stmt(ast.With([], [p]), "empty items on With") + self.stmt(ast.With([], [p], 0, 0), "empty items on With") i = ast.withitem(ast.Num(self.space.wrap(3), 0, 0), None) - self.stmt(ast.With([i], []), "empty body on With") + self.stmt(ast.With([i], [], 0, 0), "empty body on With") i = ast.withitem(ast.Name("x", ast.Store, 0, 0), None) - self.stmt(ast.With([i], [p]), "must have Load context") + self.stmt(ast.With([i], [p], 0, 0), "must have Load context") i = ast.withitem(ast.Num(self.space.wrap(3), 0, 0), ast.Name("x", ast.Load, 0, 0)) - self.stmt(ast.With([i], [p]), "must have Store context") + self.stmt(ast.With([i], [p], 0, 0), "must have Store context") def test_raise(self): r = ast.Raise(None, ast.Num(self.space.wrap(3), 0, 0), 0, 0) @@ -189,8 +188,8 @@ r = ast.Raise(ast.Num(self.space.wrap(4), 0, 0), ast.Name("x", ast.Store, 0, 0), 0, 0) self.stmt(r, "must have Load context") - @skip("enable when parser uses the new Try construct") def test_try(self): + skip("enable when parser uses the new Try construct") p = ast.Pass(0, 0) t = ast.Try([], [], [], [p]) self.stmt(t, "empty body on Try") diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl --- a/pypy/interpreter/astcompiler/tools/Python.asdl +++ b/pypy/interpreter/astcompiler/tools/Python.asdl @@ -28,7 +28,7 @@ | For(expr target, expr iter, stmt* body, stmt* orelse) | While(expr test, stmt* body, stmt* orelse) | If(expr test, stmt* body, stmt* orelse) - | With(expr context_expr, expr? optional_vars, stmt* body) + | With(withitem* items, stmt* body) | Raise(expr? exc, expr? cause) | TryExcept(stmt* body, excepthandler* handlers, stmt* orelse) @@ -118,5 +118,7 @@ -- import name with optional 'as' alias. alias = (identifier name, identifier? asname) + + withitem = (expr context_expr, expr? optional_vars) } diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -218,10 +218,14 @@ self._validate_body(node.body, "If") self._validate_stmts(node.orelse) - def visit_With(self, node): + def visit_withitem(self, node): self._validate_expr(node.context_expr) if node.optional_vars: self._validate_expr(node.optional_vars, ast.Store) + + def visit_With(self, node): + self._validate_nonempty_seq(node.items, "items", "With") + self.visit_sequence(node.items) self._validate_body(node.body, "With") def visit_Raise(self, node): From noreply at buildbot.pypy.org Mon Apr 21 18:36:35 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 18:36:35 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix a crash when "yield from" is present in source code. Message-ID: <20140421163635.341781C0721@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70820:5f601f8ea119 Date: 2014-04-21 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/5f601f8ea119/ Log: Fix a crash when "yield from" is present in source code. Not correctly implemented though. diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -870,6 +870,12 @@ self.load_const(self.space.w_None) self.emit_op(ops.YIELD_VALUE) + def visit_YieldFrom(self, yie): + # XXX not correctly implemented. + self.update_position(yie.lineno) + yie.value.walkabout(self) + self.emit_op(ops.YIELD_VALUE) + def visit_Num(self, num): self.update_position(num.lineno) self.load_const(num.n) From noreply at buildbot.pypy.org Mon Apr 21 18:36:36 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 18:36:36 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix import of _sqlite3 (needed by the buildbot "compress" step) Message-ID: <20140421163636.60D5C1C0721@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70821:84aebbaf3a3c Date: 2014-04-21 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/84aebbaf3a3c/ Log: Fix import of _sqlite3 (needed by the buildbot "compress" step) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -29,7 +29,8 @@ import string import sys import weakref -from threading import _get_ident as _thread_get_ident +import threading + try: from __pypy__ import newlist_hint except ImportError: @@ -458,7 +459,7 @@ self.__aggregate_instances = {} self.__collations = {} if check_same_thread: - self.__thread_ident = _thread_get_ident() + self.__thread_ident = threading.get_ident() self.Error = Error self.Warning = Warning @@ -501,7 +502,7 @@ def _check_thread(self): try: - if self.__thread_ident == _thread_get_ident(): + if self.__thread_ident == threading.get_ident(): return except AttributeError: pass @@ -509,7 +510,7 @@ raise ProgrammingError( "SQLite objects created in a thread can only be used in that " "same thread. The object was created in thread id %d and this " - "is thread id %d", self.__thread_ident, _thread_get_ident()) + "is thread id %d", self.__thread_ident, threading.get_ident()) def _check_thread_wrap(func): @wraps(func) From noreply at buildbot.pypy.org Mon Apr 21 20:28:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 20:28:32 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: start release branch Message-ID: <20140421182832.82D9E1C06C3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r70822:5e488affe59e Date: 2014-04-21 20:42 +0300 http://bitbucket.org/pypy/pypy/changeset/5e488affe59e/ Log: start release branch diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski From noreply at buildbot.pypy.org Mon Apr 21 20:28:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 20:28:33 +0200 (CEST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20140421182833.AAACF1C06C3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70823:ba569fe1efdb Date: 2014-04-21 21:25 +0300 http://bitbucket.org/pypy/pypy/changeset/ba569fe1efdb/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -146,3 +146,6 @@ .. branch: numpy-searchsorted Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD From noreply at buildbot.pypy.org Mon Apr 21 20:28:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 20:28:34 +0200 (CEST) Subject: [pypy-commit] pypy openbsd-lib-prefix: close merged branch Message-ID: <20140421182834.BC6BE1C06C3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: openbsd-lib-prefix Changeset: r70824:81323a128386 Date: 2014-04-21 21:26 +0300 http://bitbucket.org/pypy/pypy/changeset/81323a128386/ Log: close merged branch From noreply at buildbot.pypy.org Mon Apr 21 20:28:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 20:28:35 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into branch Message-ID: <20140421182835.F251A1C06C3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r70825:2e69f5fd041f Date: 2014-04-21 21:26 +0300 http://bitbucket.org/pypy/pypy/changeset/2e69f5fd041f/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -146,3 +146,6 @@ .. branch: numpy-searchsorted Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -22,4 +22,3 @@ arr[j] = i i += step return arr - diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -677,23 +677,23 @@ def descr_round(self, space, decimals=0, w_out=None): if space.is_none(w_out): if self.get_dtype().is_bool(): - #numpy promotes bool.round() to float16. Go figure. + # numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), - descriptor.get_dtype_cache(space).w_float16dtype) + descriptor.get_dtype_cache(space).w_float16dtype) else: w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) out = descriptor.dtype_agreement(space, [self], self.get_shape(), - w_out) + w_out) if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = descriptor.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() if decimals == 0: - out = out.descr_view(space,space.type(self)) + out = out.descr_view(space, space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out @@ -711,16 +711,16 @@ side = 'r' else: raise oefmt(space.w_ValueError, - "'%s' is an invalid value for keyword 'side'", side) + "'%s' is an invalid value for keyword 'side'", side) if len(self.get_shape()) > 1: raise OperationError(space.w_ValueError, space.wrap( - "a must be a 1-d array")) + "a must be a 1-d array") v = convert_to_array(space, w_v) - if len(v.get_shape()) >1: + if len(v.get_shape()) > 1: raise OperationError(space.w_ValueError, space.wrap( - "v must be a 1-d array-like")) - ret = W_NDimArray.from_shape(space, v.get_shape(), - descriptor.get_dtype_cache(space).w_longdtype) + "v must be a 1-d array-like") + ret = W_NDimArray.from_shape( + space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) return ret @@ -1277,35 +1277,26 @@ app_searchsort = applevel(r""" def searchsort(arr, v, side, result): - def left_find_index(a, val): + import operator + def func(a, op, val): imin = 0 imax = a.size while imin < imax: imid = imin + ((imax - imin) >> 1) - if a[imid] < val: - imin = imid +1 - else: - imax = imid - return imin - def right_find_index(a, val): - imin = 0 - imax = a.size - while imin < imax: - imid = imin + ((imax - imin) >> 1) - if a[imid] <= val: + if op(a[imid], val): imin = imid +1 else: imax = imid return imin if side == 'l': - func = left_find_index + op = operator.lt else: - func = right_find_index + op = operator.le if v.size < 2: - result[...] = func(arr, v) + result[...] = func(arr, op, v) else: for i in range(v.size): - result[i] = func(arr, v[i]) + result[i] = func(arr, op, v[i]) return result """, filename=__file__).interphook('searchsort') diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -362,4 +362,3 @@ assert (ret == [0, 5, 1, 2]).all() if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))") - diff --git a/rpython/translator/platform/bsd.py b/rpython/translator/platform/bsd.py --- a/rpython/translator/platform/bsd.py +++ b/rpython/translator/platform/bsd.py @@ -6,6 +6,7 @@ DEFAULT_CC = 'clang' so_ext = 'so' + so_prefixes = ('lib', '') make_cmd = 'gmake' standalone_only = [] From noreply at buildbot.pypy.org Mon Apr 21 20:48:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 20:48:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140421184843.E46D61C0543@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70826:389b28ee75ee Date: 2014-04-21 19:45 +0200 http://bitbucket.org/pypy/pypy/changeset/389b28ee75ee/ Log: in-progress diff --git a/rpython/jit/backend/llsupport/gcmap.py b/rpython/jit/backend/llsupport/gcmap.py --- a/rpython/jit/backend/llsupport/gcmap.py +++ b/rpython/jit/backend/llsupport/gcmap.py @@ -4,15 +4,29 @@ from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.symbolic import WORD -def allocate_gcmap(assembler, frame_depth, fixed_size): +GCMAP_STM_LOCATION = 2 # xxx add this only if stm + +def allocate_gcmap(assembler, frame_depth, fixed_size, stm_location=None): size = frame_depth + fixed_size - malloc_size = (size // WORD // 8 + 1) + 1 + malloc_size = (size // WORD // 8 + 1) + GCMAP_STM_LOCATION + 1 rawgcmap = assembler.datablockwrapper.malloc_aligned(WORD * malloc_size, WORD) # set the length field rffi.cast(rffi.CArrayPtr(lltype.Signed), rawgcmap)[0] = malloc_size - 1 gcmap = rffi.cast(lltype.Ptr(jitframe.GCMAP), rawgcmap) # zero the area - for i in range(malloc_size - 1): + for i in range(malloc_size - 3): gcmap[i] = r_uint(0) + # write the stm_location in the last two words + raw_stm_location = extract_raw_stm_location(stm_location) + gcmap[malloc_size - 3], gcmap[malloc_size - 2] = raw_stm_location return gcmap + +def extract_raw_stm_location(stm_location): + if stm_location is not None: + num = rffi.cast(lltype.Unsigned, stm_location.num) + ref = rffi.cast(lltype.Unsigned, stm_location.ref) + else: + num = r_uint(0) + ref = r_uint(0) + return (num, ref) diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -160,6 +160,10 @@ MAX = 64 gcmap = (obj_addr + getofs('jf_gcmap')).address[0] gcmap_lgt = (gcmap + GCMAPLENGTHOFS).signed[0] + # + from rpython.jit.backend.llsupport.gcmap import GCMAP_STM_LOCATION + gcmap_lgt -= GCMAP_STM_LOCATION + # while no < gcmap_lgt: cur = (gcmap + GCMAPBASEOFS + UNSIGN_SIZE * no).unsigned[0] while not (cur & (1 << state)): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -5,7 +5,7 @@ from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, DEBUG_COUNTER, debug_bridge) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from rpython.jit.backend.llsupport.gcmap import allocate_gcmap +from rpython.jit.backend.llsupport.gcmap import extract_raw_stm_location from rpython.jit.metainterp.history import Const, Box, VOID from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory @@ -396,6 +396,29 @@ # if not for_frame: self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) + # + if self.cpu.gc_ll_descr.stm: + # We are in the slow-path of write barriers, which is + # supposed to be called rarely. We have to save the + # current 'stm_location' so that it is found. The easiest + # is to simply push it on the shadowstack, from its source + # location as two extra arguments on the machine stack. + # 'r14' is kept around as the original value of + # shadowstack_top, ready to be stored back below. + # XXX this should also be done if 'for_frame' is true... + assert IS_X86_64 + mc.MOV(r14, self.heap_shadowstack_top()) + mc.MOV_rs(edi.value, 3 * WORD) + # do here the 'num = (num<<1) + 1' rather than at the caller + # site, to increase the chances that it can use PUSH_i8 + mc.LEA_ra(edi.value, (self.SEGMENT_NO, rx86.NO_BASE_REGISTER, + edi.value, 1, +1)) + mc.MOV_mr((self.SEGMENT_NO, r14.value, 0), edi.value) + mc.MOV_rs(edi.value, 2 * WORD) + mc.MOV_mr((self.SEGMENT_NO, r14.value, WORD), edi.value) + mc.LEA_rm(edi.value, (self.SEGMENT_NO, r14.value, 2 * WORD)) + mc.MOV(self.heap_shadowstack_top(), edi) + # if IS_X86_32: # we have 2 extra words on stack for retval and we pass 1 extra # arg, so we need to substract 2 words @@ -440,11 +463,16 @@ # if not for_frame: + if self.cpu.gc_ll_descr.stm: + mc.MOV(self.heap_shadowstack_top(), r14) if IS_X86_32: # ADD touches CPU flags mc.LEA_rs(esp.value, 2 * WORD) self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) - mc.RET16_i(WORD) + if self.cpu.gc_ll_descr.stm: + mc.RET16_i(3 * WORD) + else: + mc.RET16_i(WORD) else: if IS_X86_32: mc.MOV_rs(edx.value, 4 * WORD) @@ -1863,7 +1891,7 @@ guard_opnum == rop.GUARD_NOT_FORCED) is_guard_not_invalidated = guard_opnum == rop.GUARD_NOT_INVALIDATED is_guard_not_forced = guard_opnum == rop.GUARD_NOT_FORCED - gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + gcmap = self._regalloc.get_empty_gcmap(frame_depth) return GuardToken(self.cpu, gcmap, faildescr, failargs, fail_locs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced) @@ -2229,6 +2257,16 @@ assert self.wb_slowpath[helper_num] != 0 # if not is_frame: + if self.cpu.gc_ll_descr.stm: + # get the num and ref components of the stm_location, and + # push them to the stack. It's 16 bytes, so alignment is + # still ok. The one or three words pushed here are removed + # by the callee. + assert IS_X86_64 + num, ref = extract_raw_stm_location( + self._regalloc.stm_location) + mc.PUSH_i(num) + mc.PUSH_i(ref) mc.PUSH(loc_base) if is_frame and align_stack: mc.SUB_ri(esp.value, 16 - WORD) # erase the return address diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -7,6 +7,7 @@ from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) from rpython.jit.backend.llsupport.gcmap import allocate_gcmap +from rpython.jit.backend.llsupport.jiframe import GCMAP from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op) from rpython.jit.backend.x86 import rx86 @@ -22,7 +23,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr, ConstFloat, BoxInt, BoxFloat, INT, REF, FLOAT, TargetToken) -from rpython.jit.metainterp.resoperation import rop, ResOperation +from rpython.jit.metainterp.resoperation import rop, ResOperation, StmLocation from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint @@ -150,6 +151,9 @@ assembler = self.assembler) self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm, assembler = self.assembler) + # 'self.stm_location' is the StmLocation of the current operation + # (or the last one that actually had a StmLocation) + self.stm_location = None return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): @@ -318,6 +322,12 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.xrm.position = i + # + if op.stm_location is not None: + if (self.stm_location.num != op.stm_location.num or + self.stm_location.ref != op.stm_location.ref): + self.stm_location = op.stm_location + # if op.has_no_side_effect() and op.result not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) @@ -899,9 +909,13 @@ gc_ll_descr.get_nursery_top_addr(), lengthloc, itemsize, maxlength, gcmap, arraydescr) + def get_empty_gcmap(self, frame_depth): + return allocate_gcmap(self.assembler, frame_depth, + JITFRAME_FIXED_SIZE, self.stm_location) + def get_gcmap(self, forbidden_regs=[], noregs=False): frame_depth = self.fm.get_frame_depth() - gcmap = allocate_gcmap(self.assembler, frame_depth, JITFRAME_FIXED_SIZE) + gcmap = self.get_empty_gcmap(frame_depth) for box, loc in self.rm.reg_bindings.iteritems(): if loc in forbidden_regs: continue diff --git a/rpython/jit/backend/x86/stmtlocal.py b/rpython/jit/backend/x86/stmtlocal.py --- a/rpython/jit/backend/x86/stmtlocal.py +++ b/rpython/jit/backend/x86/stmtlocal.py @@ -9,6 +9,7 @@ _instruction = "movq %%fs:0, %0" eci = ExternalCompilationInfo(post_include_bits=[''' +#define RPY_STM_JIT 1 static long pypy__threadlocal_base(void) { /* XXX ONLY LINUX WITH GCC/CLANG FOR NOW XXX */ diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -56,7 +56,7 @@ return *(long *)(src + ofs); } -static RPyStringSpace0 *_fetch_rpyspace0(char *seg_base, object_t *base, +static RPyStringSpace0 *_fetch_rpsspace0(char *seg_base, object_t *base, long ofs) { char *src = seg_base + (uintptr_t)base; @@ -74,14 +74,24 @@ RPyStringSpace0 *co_name; RPyStringSpace0 *co_lnotab; char *ntrunc = "", *fntrunc = ""; - long fnlen, nlen, line; - char *fn, *name; + long fnlen = 1, nlen = 1, line = 0; + char *fn = "?", *name = "?"; + +#ifdef RPY_STM_JIT + if (odd_number == STM_STACK_MARKER_NEW || + odd_number == STM_STACK_MARKER_OLD) { + assert(o); + /* XXX ji_jf_forward */ + /* XXX */ + o = NULL; + } +#endif if (o) { - co_filename =_fetch_rpyspace0(segment_base, o, g_co_filename_ofs); - co_name =_fetch_rpyspace0(segment_base, o, g_co_name_ofs); + co_filename =_fetch_rpsspace0(segment_base, o, g_co_filename_ofs); + co_name =_fetch_rpsspace0(segment_base, o, g_co_name_ofs); co_firstlineno=_fetch_lngspace0(segment_base, o, g_co_firstlineno_ofs); - co_lnotab =_fetch_rpyspace0(segment_base, o, g_co_lnotab_ofs); + co_lnotab =_fetch_rpsspace0(segment_base, o, g_co_lnotab_ofs); long remaining = outputbufsize - 32; nlen = RPyString_Size(co_name); @@ -112,13 +122,6 @@ line += ((unsigned char *)lnotab)[i + 1]; } } - else { - fnlen = 1; - fn = "?"; - nlen = 1; - name = "?"; - line = 0; - } snprintf(outputbuf, outputbufsize, "File \"%s%.*s\", line %ld, in %.*s%s", fntrunc, (int)fnlen, fn, line, (int)nlen, name, ntrunc); From noreply at buildbot.pypy.org Mon Apr 21 20:48:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 21 Apr 2014 20:48:45 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Small fixes, and translation fixes (still in-progress) Message-ID: <20140421184845.1F3B41C0543@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70827:3a5604db65d7 Date: 2014-04-21 20:48 +0200 http://bitbucket.org/pypy/pypy/changeset/3a5604db65d7/ Log: Small fixes, and translation fixes (still in-progress) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -395,6 +395,10 @@ mc = codebuf.MachineCodeBlockWrapper() # if not for_frame: + if self.cpu.gc_ll_descr.stm: + assert IS_X86_64 + mc.PUSH_r(X86_64_SCRATCH_REG.value) + # self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) # if self.cpu.gc_ll_descr.stm: @@ -402,24 +406,23 @@ # supposed to be called rarely. We have to save the # current 'stm_location' so that it is found. The easiest # is to simply push it on the shadowstack, from its source - # location as two extra arguments on the machine stack. - # 'r14' is kept around as the original value of - # shadowstack_top, ready to be stored back below. + # location as two extra arguments on the machine stack + # (at this point containing: [ref][retaddr][num][obj]...) # XXX this should also be done if 'for_frame' is true... - assert IS_X86_64 - mc.MOV(r14, self.heap_shadowstack_top()) - mc.MOV_rs(edi.value, 3 * WORD) + mc.MOV(esi, self.heap_shadowstack_top()) + mc.MOV_rs(edi.value, 2 * WORD) # [num] # do here the 'num = (num<<1) + 1' rather than at the caller # site, to increase the chances that it can use PUSH_i8 mc.LEA_ra(edi.value, (self.SEGMENT_NO, rx86.NO_BASE_REGISTER, edi.value, 1, +1)) - mc.MOV_mr((self.SEGMENT_NO, r14.value, 0), edi.value) - mc.MOV_rs(edi.value, 2 * WORD) - mc.MOV_mr((self.SEGMENT_NO, r14.value, WORD), edi.value) - mc.LEA_rm(edi.value, (self.SEGMENT_NO, r14.value, 2 * WORD)) - mc.MOV(self.heap_shadowstack_top(), edi) - # - if IS_X86_32: + mc.MOV_mr((self.SEGMENT_NO, esi.value, 0), edi.value) + mc.MOV_rs(edi.value, 0 * WORD) # [ref] + mc.MOV_mr((self.SEGMENT_NO, esi.value, WORD), edi.value) + mc.MOV_sr(0 * WORD, esi.value) # save org shadowstack_top + mc.LEA_rm(esi.value, (self.SEGMENT_NO, esi.value, 2 * WORD)) + mc.MOV(self.heap_shadowstack_top(), esi) + mc.MOV_rs(edi.value, 3 * WORD) # [obj] + elif IS_X86_32: # we have 2 extra words on stack for retval and we pass 1 extra # arg, so we need to substract 2 words mc.SUB_ri(esp.value, 2 * WORD) @@ -463,14 +466,13 @@ # if not for_frame: - if self.cpu.gc_ll_descr.stm: - mc.MOV(self.heap_shadowstack_top(), r14) if IS_X86_32: # ADD touches CPU flags mc.LEA_rs(esp.value, 2 * WORD) self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) if self.cpu.gc_ll_descr.stm: - mc.RET16_i(3 * WORD) + mc.POP(self.heap_shadowstack_top()) + mc.RET16_i(2 * WORD) else: mc.RET16_i(WORD) else: @@ -2257,6 +2259,7 @@ assert self.wb_slowpath[helper_num] != 0 # if not is_frame: + mc.PUSH(loc_base) if self.cpu.gc_ll_descr.stm: # get the num and ref components of the stm_location, and # push them to the stack. It's 16 bytes, so alignment is @@ -2265,9 +2268,8 @@ assert IS_X86_64 num, ref = extract_raw_stm_location( self._regalloc.stm_location) - mc.PUSH_i(num) - mc.PUSH_i(ref) - mc.PUSH(loc_base) + mc.PUSH(imm(rffi.cast(lltype.Signed, num))) + mc.MOV(X86_64_SCRATCH_REG, imm(rffi.cast(lltype.Signed, ref))) if is_frame and align_stack: mc.SUB_ri(esp.value, 16 - WORD) # erase the return address mc.CALL(imm(self.wb_slowpath[helper_num])) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -7,7 +7,7 @@ from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.jiframe import GCMAP +from rpython.jit.backend.llsupport.jitframe import GCMAP from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op) from rpython.jit.backend.x86 import rx86 @@ -324,7 +324,8 @@ self.xrm.position = i # if op.stm_location is not None: - if (self.stm_location.num != op.stm_location.num or + if (self.stm_location is None or + self.stm_location.num != op.stm_location.num or self.stm_location.ref != op.stm_location.ref): self.stm_location = op.stm_location # diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -515,6 +515,9 @@ TY_REF = 0x04 TY_FLOAT = 0x06 + stm_location_int = 0 + stm_location_ref = lltype.nullptr(llmemory.GCREF.TO) + def store_final_boxes(self, guard_op, boxes, metainterp_sd): guard_op.setfailargs(boxes) self.rd_count = len(boxes) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -25,7 +25,7 @@ stm_location = None _cls_has_bool_result = False - _attrs_ = ('result',) + _attrs_ = ('result', 'stm_location') def __init__(self, result): self.result = result From noreply at buildbot.pypy.org Mon Apr 21 20:55:24 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 20:55:24 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: update release versioning Message-ID: <20140421185524.CA8D61C0543@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r70828:26d2174181ed Date: 2014-04-21 21:42 +0300 http://bitbucket.org/pypy/pypy/changeset/26d2174181ed/ Log: update release versioning diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.2' +version = '2.3' # The full version, including alpha/beta/rc tags. -release = '2.2.1' +release = '2.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.1`_: the latest official release +* `Release 2.3.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.1`: http://pypy.org/download.html +.. _`Release 2.3.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,7 +21,7 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 -#define PY_MICRO_VERSION 3 +#define PY_MICRO_VERSION 6 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.6" /* PyPy version as a string */ -#define PYPY_VERSION "2.3.0-alpha0" +#define PYPY_VERSION "2.3.0-final0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 3, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 3, 0, "final", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Mon Apr 21 20:55:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 20:55:26 +0200 (CEST) Subject: [pypy-commit] pypy default: whoops Message-ID: <20140421185526.094C21C0543@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70829:e54a3b9abdc0 Date: 2014-04-21 21:54 +0300 http://bitbucket.org/pypy/pypy/changeset/e54a3b9abdc0/ Log: whoops diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -713,11 +713,11 @@ raise oefmt(space.w_ValueError, "'%s' is an invalid value for keyword 'side'", side) if len(self.get_shape()) > 1: - raise OperationError(space.w_ValueError, space.wrap( + raise oefmt(space.w_ValueError, "a must be a 1-d array") v = convert_to_array(space, w_v) if len(v.get_shape()) > 1: - raise OperationError(space.w_ValueError, space.wrap( + raise oefmt(space.w_ValueError, "v must be a 1-d array-like") ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) From noreply at buildbot.pypy.org Mon Apr 21 21:25:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 21 Apr 2014 21:25:19 +0200 (CEST) Subject: [pypy-commit] pypy default: pep8/cleanup Message-ID: <20140421192519.D2C421D2BEB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70830:e9d10a77fab6 Date: 2014-04-21 15:21 -0400 http://bitbucket.org/pypy/pypy/changeset/e9d10a77fab6/ Log: pep8/cleanup diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1,7 +1,8 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ - WrappedDefault -from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr + WrappedDefault +from pypy.interpreter.typedef import TypeDef, GetSetProperty, \ + make_weakref_descr from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from rpython.rlib.rawstorage import RAW_STORAGE_PTR @@ -12,10 +13,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.arrayops import repeat, choose, put from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, \ - ArrayArgumentException, wrap_impl + ArrayArgumentException, wrap_impl from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.module.micronumpy.converters import order_converter, shape_converter, \ - multi_axis_converter +from pypy.module.micronumpy.converters import multi_axis_converter, \ + order_converter, shape_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.strides import get_shape_from_iterable, \ @@ -33,15 +34,14 @@ right_critical_dim = len(right_shape) - 2 right_critical_dim_size = right_shape[right_critical_dim] assert right_critical_dim >= 0 - out_shape = out_shape + left_shape[:-1] + \ - right_shape[0:right_critical_dim] + \ - right_shape[right_critical_dim + 1:] + out_shape = (out_shape + left_shape[:-1] + + right_shape[0:right_critical_dim] + + right_shape[right_critical_dim + 1:]) elif len(right_shape) > 0: #dot does not reduce for scalars out_shape = out_shape + left_shape[:-1] if my_critical_dim_size != right_critical_dim_size: - raise OperationError(space.w_ValueError, space.wrap( - "objects are not aligned")) + raise oefmt(space.w_ValueError, "objects are not aligned") return out_shape, right_critical_dim @@ -55,8 +55,8 @@ return self.implementation.get_shape() def descr_set_shape(self, space, w_new_shape): - self.implementation = self.implementation.set_shape(space, self, - get_shape_from_iterable(space, self.get_size(), w_new_shape)) + shape = get_shape_from_iterable(space, self.get_size(), w_new_shape) + self.implementation = self.implementation.set_shape(space, self, shape) def descr_get_strides(self, space): strides = self.implementation.get_strides() @@ -72,8 +72,8 @@ return self.implementation.dtype def descr_set_dtype(self, space, w_dtype): - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) if (dtype.elsize != self.get_dtype().elsize or dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( @@ -115,7 +115,8 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), + w_instance=self) return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): @@ -128,10 +129,10 @@ size = loop.count_all_true(idx) if size > val.get_size() and val.get_size() != 1: raise oefmt(space.w_ValueError, - "NumPy boolean array indexing assignment " - "cannot assign %d input values to " - "the %d output values where the mask is true", - val.get_size(), size) + "NumPy boolean array indexing assignment " + "cannot assign %d input values to " + "the %d output values where the mask is true", + val.get_size(), size) loop.setitem_filter(space, self, idx, val) def _prepare_array_index(self, space, w_index): @@ -151,7 +152,7 @@ prefix = [] for i, w_item in enumerate(w_lst): if (isinstance(w_item, W_NDimArray) or - space.isinstance_w(w_item, space.w_list)): + space.isinstance_w(w_item, space.w_list)): w_item = convert_to_array(space, w_item) if shape is None: shape = w_item.get_shape() @@ -163,7 +164,7 @@ arr_index_in_shape = True else: if space.isinstance_w(w_item, space.w_slice): - _, _, _, lgt = space.decode_index4(w_item, self.get_shape()[i]) + lgt = space.decode_index4(w_item, self.get_shape()[i])[3] if not arr_index_in_shape: prefix.append(w_item) res_shape.append(lgt) @@ -178,7 +179,7 @@ def getitem_array_int(self, space, w_index): prefix, res_shape, iter_shape, indexes = \ - self._prepare_array_index(space, w_index) + self._prepare_array_index(space, w_index) if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) @@ -194,7 +195,7 @@ def setitem_array_int(self, space, w_index, w_value): val_arr = convert_to_array(space, w_value) prefix, _, iter_shape, indexes = \ - self._prepare_array_index(space, w_index) + self._prepare_array_index(space, w_index) if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) @@ -331,8 +332,8 @@ def descr_set_imag(self, space, w_value): # if possible, copy (broadcast) values into self if not self.get_dtype().is_complex(): - raise OperationError(space.w_TypeError, - space.wrap('array does not have imaginary part to set')) + raise oefmt(space.w_TypeError, + 'array does not have imaginary part to set') self.implementation.set_imag(space, self, w_value) def reshape(self, space, w_shape): @@ -481,7 +482,7 @@ assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, - "can only convert an array of size 1 to a Python scalar") + "can only convert an array of size 1 to a Python scalar") elif len(args_w) == 1 and len(shape) != 1: value = support.index_w(space, args_w[0]) value = support.check_and_adjust_index(space, value, self.get_size(), -1) @@ -533,6 +534,7 @@ return w_d w_pypy_data = None + def fget___pypy_data__(self, space): return self.w_pypy_data @@ -556,16 +558,16 @@ def descr_astype(self, space, w_dtype): cur_dtype = self.get_dtype() - new_dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + new_dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: - new_dtype = descriptor.variable_dtype(space, - 'S' + str(cur_dtype.elsize)) + new_dtype = descriptor.variable_dtype( + space, 'S' + str(cur_dtype.elsize)) impl = self.implementation new_impl = impl.astype(space, new_dtype) return wrap_impl(space, space.type(self), self, new_impl) @@ -583,7 +585,8 @@ loop.byteswap(self.implementation, self.implementation) return self else: - w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_instance=self) + w_res = W_NDimArray.from_shape(space, self.get_shape(), + self.get_dtype(), w_instance=self) loop.byteswap(self.implementation, w_res.implementation) return w_res @@ -599,8 +602,7 @@ min = convert_to_array(space, w_min) max = convert_to_array(space, w_max) shape = shape_agreement_multiple(space, [self, min, max, w_out]) - out = descriptor.dtype_agreement(space, [self, min, max], shape, - w_out) + out = descriptor.dtype_agreement(space, [self, min, max], shape, w_out) loop.clip(space, self, shape, min, max, out) return out @@ -620,15 +622,14 @@ raise OperationError(space.w_ValueError, space.wrap( "need at least 2 dimensions for diagonal")) if (axis1 < 0 or axis2 < 0 or axis1 >= len(self.get_shape()) or - axis2 >= len(self.get_shape())): + axis2 >= len(self.get_shape())): raise oefmt(space.w_ValueError, "axis1(=%d) and axis2(=%d) must be withing range " "(ndim=%d)", axis1, axis2, len(self.get_shape())) if axis1 == axis2: raise OperationError(space.w_ValueError, space.wrap( "axis1 and axis2 cannot be the same")) - return arrayops.diagonal(space, self.implementation, offset, - axis1, axis2) + return arrayops.diagonal(space, self.implementation, offset, axis1, axis2) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_trace(self, space, offset=0, axis1=0, axis2=1, @@ -645,6 +646,7 @@ "dumps not implemented yet")) w_flags = None + def descr_get_flags(self, space): if self.w_flags is None: self.w_flags = W_FlagsObject(self) @@ -657,8 +659,8 @@ @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): - return self.descr_view(space, - self.get_dtype().descr_newbyteorder(space, new_order)) + return self.descr_view( + space, self.get_dtype().descr_newbyteorder(space, new_order)) @unwrap_spec(w_axis=WrappedDefault(None), w_out=WrappedDefault(None)) @@ -685,8 +687,7 @@ elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) - out = descriptor.dtype_agreement(space, [self], self.get_shape(), - w_out) + out = descriptor.dtype_agreement(space, [self], self.get_shape(), w_out) if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = descriptor.get_dtype_cache(space).w_longdtype else: @@ -713,12 +714,10 @@ raise oefmt(space.w_ValueError, "'%s' is an invalid value for keyword 'side'", side) if len(self.get_shape()) > 1: - raise oefmt(space.w_ValueError, - "a must be a 1-d array") + raise oefmt(space.w_ValueError, "a must be a 1-d array") v = convert_to_array(space, w_v) if len(v.get_shape()) > 1: - raise oefmt(space.w_ValueError, - "v must be a 1-d array-like") + raise oefmt(space.w_ValueError, "v must be a 1-d array-like") ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) @@ -753,7 +752,7 @@ if axes[i]: if cur_shape[i] != 1: raise OperationError(space.w_ValueError, space.wrap( - "cannot select an axis to squeeze out " \ + "cannot select an axis to squeeze out " "which has size greater than one")) else: new_shape.append(cur_shape[i]) @@ -786,9 +785,8 @@ else: raise if w_dtype: - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), - w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) else: dtype = self.get_dtype() old_itemsize = self.get_dtype().elsize @@ -830,8 +828,8 @@ def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -844,14 +842,15 @@ def descr___nonzero__(self, space): if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( - "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + "The truth value of an array with more than one element " + "is ambiguous. Use a.any() or a.all()")) iter, state = self.create_iter() return space.wrap(space.is_true(iter.getitem(state))) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -915,7 +914,8 @@ def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) - return getattr(ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -941,8 +941,7 @@ if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): - raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + raise oefmt(space.w_TypeError, 'output must be an array') else: out = w_out other = convert_to_array(space, w_other) @@ -954,7 +953,7 @@ assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) dtype = ufuncs.find_binop_result_dtype(space, self.get_dtype(), - other.get_dtype()) + other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) @@ -986,16 +985,16 @@ other_critical_dim) def descr_mean(self, space, __args__): - return get_appbridge_cache(space).call_method(space, - 'numpy.core._methods', '_mean', __args__.prepend(self)) + return get_appbridge_cache(space).call_method( + space, 'numpy.core._methods', '_mean', __args__.prepend(self)) def descr_var(self, space, __args__): - return get_appbridge_cache(space).call_method(space, - 'numpy.core._methods', '_var', __args__.prepend(self)) + return get_appbridge_cache(space).call_method( + space, 'numpy.core._methods', '_var', __args__.prepend(self)) def descr_std(self, space, __args__): - return get_appbridge_cache(space).call_method(space, - 'numpy.core._methods', '_std', __args__.prepend(self)) + return get_appbridge_cache(space).call_method( + space, 'numpy.core._methods', '_std', __args__.prepend(self)) # ----------------------- reduce ------------------------------- @@ -1005,8 +1004,7 @@ if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): - raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + raise oefmt(space.w_TypeError, 'output must be an array') else: out = w_out return getattr(ufuncs.get(space), ufunc_name).reduce( @@ -1028,13 +1026,13 @@ def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): raise oefmt(space.w_NotImplementedError, - "axis unsupported for %s", op_name) + "axis unsupported for %s", op_name) if not space.is_none(w_out): raise oefmt(space.w_NotImplementedError, - "out unsupported for %s", op_name) + "out unsupported for %s", op_name) if self.get_size() == 0: raise oefmt(space.w_ValueError, - "Can't call %s on zero-size arrays", op_name) + "Can't call %s on zero-size arrays", op_name) try: getattr(self.get_dtype().itemtype, raw_name) except AttributeError: @@ -1117,8 +1115,8 @@ multiarray = numpypy.get("multiarray") assert isinstance(multiarray, MixedModule) reconstruct = multiarray.get("_reconstruct") - parameters = space.newtuple([self.getclass(space), - space.newtuple([space.wrap(0)]), space.wrap("b")]) + parameters = space.newtuple([self.getclass(space), space.newtuple( + [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() if isinstance(self.implementation, SliceArray): @@ -1128,15 +1126,16 @@ builder.append(box.raw_str()) state = iter.next(state) else: - builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) + builder.append_charpsize(self.implementation.get_storage(), + self.implementation.get_storage_size()) state = space.newtuple([ - space.wrap(1), # version - self.descr_get_shape(space), - self.get_dtype(), - space.wrap(False), # is_fortran - space.wrap(builder.build()), - ]) + space.wrap(1), # version + self.descr_get_shape(space), + self.get_dtype(), + space.wrap(False), # is_fortran + space.wrap(builder.build()), + ]) return space.newtuple([reconstruct, parameters, state]) @@ -1149,19 +1148,20 @@ base_index = 0 else: raise oefmt(space.w_ValueError, - "__setstate__ called with len(args[1])==%d, not 5 or 4", lens) + "__setstate__ called with len(args[1])==%d, not 5 or 4", + lens) shape = space.getitem(w_state, space.wrap(base_index)) dtype = space.getitem(w_state, space.wrap(base_index+1)) #isfortran = space.getitem(w_state, space.wrap(base_index+2)) storage = space.getitem(w_state, space.wrap(base_index+3)) if not isinstance(dtype, descriptor.W_Dtype): raise oefmt(space.w_ValueError, - "__setstate__(self, (shape, dtype, .. called with " - "improper dtype '%R'", dtype) - self.implementation = W_NDimArray.from_shape_and_storage(space, - [space.int_w(i) for i in space.listview(shape)], - rffi.str2charp(space.str_w(storage), track_allocation=False), - dtype, owning=True).implementation + "__setstate__(self, (shape, dtype, .. called with " + "improper dtype '%R'", dtype) + self.implementation = W_NDimArray.from_shape_and_storage( + space, [space.int_w(i) for i in space.listview(shape)], + rffi.str2charp(space.str_w(storage), track_allocation=False), + dtype, owning=True).implementation def descr___array_finalize__(self, space, w_obj): pass @@ -1179,8 +1179,8 @@ offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy.strides import calc_strides - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) if not space.is_none(w_buffer): @@ -1216,8 +1216,7 @@ if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) - impl = ConcreteArray(shape, dtype.base, order, strides, - backstrides) + impl = ConcreteArray(shape, dtype.base, order, strides, backstrides) w_ret = space.allocate_instance(W_NDimArray, w_subtype) W_NDimArray.__init__(w_ret, impl) space.call_function(space.getattr(w_ret, @@ -1232,16 +1231,15 @@ PyPy-only implementation detail. """ storage = rffi.cast(RAW_STORAGE_PTR, addr) - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), - w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) if w_subtype: if not space.isinstance_w(w_subtype, space.w_type): raise OperationError(space.w_ValueError, space.wrap( "subtype must be a subtype of ndarray, not a class instance")) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - 'C', False, w_subtype) + 'C', False, w_subtype) else: return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) From noreply at buildbot.pypy.org Mon Apr 21 21:30:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 21:30:52 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: update contributors, lots of new people doing good stuff, thanks! Message-ID: <20140421193052.77A511D2BEB@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r70831:20e51c4389ed Date: 2014-04-21 22:28 +0300 http://bitbucket.org/pypy/pypy/changeset/20e51c4389ed/ Log: update contributors, lots of new people doing good stuff, thanks! diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine From noreply at buildbot.pypy.org Mon Apr 21 21:31:53 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 21:31:53 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Convert ast ValidationError to applevel ValueError. Message-ID: <20140421193153.77A1E1D2BEB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70832:a1adffb9acd7 Date: 2014-04-21 21:29 +0200 http://bitbucket.org/pypy/pypy/changeset/a1adffb9acd7/ Log: Convert ast ValidationError to applevel ValueError. diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -11,7 +11,8 @@ class ValidationError(Exception): - pass + def __init__(self, message): + self.message = message def expr_context_name(ctx): diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -137,7 +137,11 @@ return code def validate_ast(self, node): - validate.validate_ast(self.space, node) + try: + validate.validate_ast(self.space, node) + except validate.ValidationError as e: + raise OperationError(self.space.w_ValueError, + self.space.wrap(e.message)) def compile_to_ast(self, source, filename, mode, flags): info = pyparse.CompileInfo(filename, mode, flags) diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -878,6 +878,15 @@ # the code object's filename comes from the second compilation step assert co2.co_filename == '%s3' % fname + def test_invalid_ast(self): + import _ast + delete = _ast.Delete([]) + delete.lineno = 0 + delete.col_offset = 0 + mod = _ast.Module([delete]) + exc = raises(ValueError, compile, mod, 'filename', 'exec') + assert str(exc.value) == "empty targets on Delete" + class AppTestOptimizer: From noreply at buildbot.pypy.org Mon Apr 21 21:31:54 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 21 Apr 2014 21:31:54 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: This test was also skipped previously Message-ID: <20140421193154.A54721D2BEB@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r70833:5ec636950a44 Date: 2014-04-21 21:29 +0200 http://bitbucket.org/pypy/pypy/changeset/5ec636950a44/ Log: This test was also skipped previously diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -307,6 +307,7 @@ ) # sys._current_frames() is a CPython-only gimmick. + @test.support.impl_detail("current_frames") def test_current_frames(self): have_threads = True try: From noreply at buildbot.pypy.org Mon Apr 21 22:37:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 21 Apr 2014 22:37:01 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup some tests in test_rsocket Message-ID: <20140421203701.781131C0721@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70834:fc668990004b Date: 2014-04-21 13:36 -0700 http://bitbucket.org/pypy/pypy/changeset/fc668990004b/ Log: cleanup some tests in test_rsocket diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -70,12 +70,14 @@ try: cpy_socket.gethostbyaddr("::1") except cpy_socket.herror: - ipv6 = False + ipv6 = HSocketError + except cpy_socket.gaierror: + ipv6 = GAIError else: - ipv6 = True + ipv6 = None for host in ["localhost", "127.0.0.1", "::1"]: - if host == "::1" and not ipv6: - with py.test.raises(HSocketError): + if host == "::1" and ipv6: + with py.test.raises(ipv6): gethostbyaddr(host) continue name, aliases, address_list = gethostbyaddr(host) @@ -162,10 +164,7 @@ assert addr.eq(sock.getsockname()) sock.listen(1) s2 = RSocket(AF_INET, SOCK_STREAM) - if sys.platform != 'win32': - # test one side with timeouts so select is used - # XXX fix on win32 - s2.settimeout(10.0) + s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test def connecting(): try: s2.connect(addr) @@ -381,26 +380,35 @@ assert value != 0 def test_dup(): - if sys.platform == "win32": - skip("dup does not work on Windows") s = RSocket(AF_INET, SOCK_STREAM) - s.bind(INETAddress('localhost', 50007)) - s2 = s.dup() - assert s.fd != s2.fd - assert s.getsockname().eq(s2.getsockname()) - s.close() - s2.close() + try: + s.bind(INETAddress('localhost', 50007)) + if sys.platform == "win32": + assert not hasattr(s, 'dup') + return + s2 = s.dup() + try: + assert s.fd != s2.fd + assert s.getsockname().eq(s2.getsockname()) + finally: + s2.close() + finally: + s.close() def test_c_dup(): # rsocket.dup() duplicates fd, it also works on Windows # (but only on socket handles!) s = RSocket(AF_INET, SOCK_STREAM) - s.bind(INETAddress('localhost', 50007)) - s2 = RSocket(fd=dup(s.fd)) - assert s.fd != s2.fd - assert s.getsockname().eq(s2.getsockname()) - s.close() - s2.close() + try: + s.bind(INETAddress('localhost', 50007)) + s2 = RSocket(fd=dup(s.fd)) + try: + assert s.fd != s2.fd + assert s.getsockname().eq(s2.getsockname()) + finally: + s2.close() + finally: + s.close() def test_inet_aton(): assert inet_aton('1.2.3.4') == '\x01\x02\x03\x04' From noreply at buildbot.pypy.org Mon Apr 21 22:49:55 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 21 Apr 2014 22:49:55 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: shortcut to bytes_w Message-ID: <20140421204955.644541C06C3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70835:9b05efa1e2d2 Date: 2014-04-21 13:25 -0700 http://bitbucket.org/pypy/pypy/changeset/9b05efa1e2d2/ Log: shortcut to bytes_w diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1658,7 +1658,7 @@ return self.space.wrapbytes(stringval) def unwrap(self, w_string): - return self.space.str_w(w_string) + return self.space.bytes_w(w_string) erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) From noreply at buildbot.pypy.org Mon Apr 21 22:49:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 21 Apr 2014 22:49:56 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: another bytes strat fix Message-ID: <20140421204956.A2D261C06C3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70836:1a810f29fb28 Date: 2014-04-21 13:25 -0700 http://bitbucket.org/pypy/pypy/changeset/1a810f29fb28/ Log: another bytes strat fix diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -821,7 +821,7 @@ unerase = staticmethod(unerase) def wrap(self, unwrapped): - return self.space.wrap(unwrapped) + return self.space.wrapbytes(unwrapped) def unwrap(self, wrapped): return self.space.bytes_w(wrapped) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1015,6 +1015,11 @@ # gives us (1, 2), but 1 is not in the dict any longer. #raises(RuntimeError, list, it) + def test_bytes_to_object(self): + d = {b'a': 'b'} + d[object()] = None + assert b'a' in list(d) + class FakeString(str): From noreply at buildbot.pypy.org Mon Apr 21 23:02:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 21 Apr 2014 23:02:59 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_streamio on win32 Message-ID: <20140421210259.AE16B1C13AA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70837:7a7431b12694 Date: 2014-04-21 14:02 -0700 http://bitbucket.org/pypy/pypy/changeset/7a7431b12694/ Log: fix test_streamio on win32 diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -3,15 +3,15 @@ import os import time import random + +import pytest + +from rpython.rlib import streamio +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir -from rpython.rlib import streamio - -from rpython.rtyper.test.tool import BaseRtypingTest - class TSource(streamio.Stream): - def __init__(self, packets, tell=True, seek=True): for x in packets: assert x @@ -1066,8 +1066,8 @@ def test_read_interrupted(self): try: from signal import alarm, signal, SIG_DFL, SIGALRM - except: - skip('no alarm on this platform') + except ImportError: + pytest.skip('no alarm on this platform') try: read_fd, write_fd = os.pipe() file = streamio.DiskFile(read_fd) @@ -1082,8 +1082,8 @@ def test_write_interrupted(self): try: from signal import alarm, signal, SIG_DFL, SIGALRM - except: - skip('no alarm on this platform') + except ImportError: + pytest.skip('no alarm on this platform') try: read_fd, write_fd = os.pipe() file = streamio.DiskFile(write_fd) @@ -1123,15 +1123,14 @@ def speed_main(): def diskopen(fn, mode): filemode = 0 - import mmap if "r" in mode: filemode = os.O_RDONLY if "w" in mode: filemode |= os.O_WRONLY - fd = os.open(fn, filemode) base = streamio.DiskFile(fd) return streamio.BufferingInputStream(base) + def mmapopen(fn, mode): mmapmode = 0 filemode = 0 @@ -1144,7 +1143,7 @@ filemode |= os.O_WRONLY fd = os.open(fn, filemode) return streamio.MMapFile(fd, mmapmode) + timeit(opener=diskopen) timeit(opener=mmapopen) timeit(opener=open) - From noreply at buildbot.pypy.org Mon Apr 21 23:11:05 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 23:11:05 +0200 (CEST) Subject: [pypy-commit] pypy default: restart whatnew after release-2.3 Message-ID: <20140421211105.7AA1E1C0185@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70838:b6c402a43ae2 Date: 2014-04-22 00:03 +0300 http://bitbucket.org/pypy/pypy/changeset/b6c402a43ae2/ Log: restart whatnew after release-2.3 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.3.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.3.0.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,151 +1,7 @@ ======================= -What's new in PyPy 2.2+ +What's new in PyPy 2.3+ ======================= -.. this is a revision shortly after release-2.2.x -.. startrev: 4cd1bc8b3111 +.. this is a revision shortly after release-2.3.x +.. startrev: 20e51c4389ed -.. branch: release-2.2.x - -.. branch: numpy-newbyteorder -Clean up numpy types, add newbyteorder functionality - -.. branch: windows-packaging -Package tk/tcl runtime with win32 - -.. branch: armhf-singlefloat -JIT support for singlefloats on ARM using the hardfloat ABI - -.. branch: voidtype_strformat -Better support for record numpy arrays - -.. branch: osx-eci-frameworks-makefile -OSX: Ensure frameworks end up in Makefile when specified in External compilation info - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace and annotator. - -.. branch: ndarray-buffer -adds support for the buffer= argument to the ndarray ctor - -.. branch: better_ftime_detect2 -On OpenBSD do not pull in libcompat.a as it is about to be removed. -And more generally, if you have gettimeofday(2) you will not need ftime(3). - -.. branch: timeb_h -Remove dependency upon on OpenBSD. This will be disappearing -along with libcompat.a. - -.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 -Fix 3 broken links on PyPy published papers in docs. - -.. branch: jit-ordereddict - -.. branch: refactor-str-types -Remove multimethods on str/unicode/bytearray and make the implementations share code. - -.. branch: remove-del-from-generatoriterator -Speed up generators that don't yield inside try or wait blocks by skipping -unnecessary cleanup. - -.. branch: annotator -Remove FlowObjSpace. -Improve cohesion between rpython.flowspace and rpython.annotator. - -.. branch: detect-immutable-fields -mapdicts keep track of whether or not an attribute is every assigned to -multiple times. If it's only assigned once then an elidable lookup is used when -possible. - -.. branch: precompiled-headers -Create a Makefile using precompiled headers for MSVC platforms. -The downside is a messy nmake-compatible Makefile. Since gcc shows minimal -speedup, it was not implemented. - -.. branch: camelot -With a properly configured 256-color terminal (TERM=...-256color), the -Mandelbrot set shown during translation now uses a range of 50 colours. -Essential! - -.. branch: NonConstant -Simplify implementation of NonConstant. - -.. branch: array-propagate-len -Kill some guards and operations in JIT traces by adding integer bounds -propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). - -.. branch: optimize-int-and -Optimize away INT_AND with constant mask of 1s that fully cover the bitrange -of other operand. - -.. branch: bounds-int-add-or -Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the -operands are positive to kill some guards - -.. branch: remove-intlong-smm -kills int/long/smalllong/bool multimethods - -.. branch: numpy-refactor -Cleanup micronumpy module - -.. branch: int_w-refactor -In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. - -.. branch: test-58c3d8552833 -Fix for getarrayitem_gc_pure optimization - -.. branch: simple-range-strategy -Implements SimpleRangeListStrategy for case range(n) where n is a positive number. -Makes some traces nicer by getting rid of multiplication for calculating loop counter -and propagates that n > 0 further to get rid of guards. - -.. branch: popen-pclose -Provide an exit status for popen'ed RFiles via pclose - -.. branch: stdlib-2.7.6 -Update stdlib to v2.7.6 - -.. branch: virtual-raw-store-load -Support for virtualizing raw_store/raw_load operations - -.. branch: refactor-buffer-api -Separate the interp-level buffer API from the buffer type exposed to -app-level. The `Buffer` class is now used by `W_MemoryView` and -`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was -an alias to `Buffer`, which was wrappable itself. - -.. branch: improve-consecutive-dict-lookups -Improve the situation when dict lookups of the same key are performed in a chain - -.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 -.. branch: test_SetFromErrnoWithFilename_NULL -.. branch: test_SetFromErrnoWithFilename__tweaks - -.. branch: refactor_PyErr_SetFromErrnoWithFilename -Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext - -.. branch: win32-fixes4 -fix more tests for win32 - -.. branch: latest-improve-doc -Fix broken links in documentation - -.. branch: ast-issue1673 -fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field - -.. branch: issue1514 -Fix issues with reimporting builtin modules - -.. branch: numpypy-nditer -Implement the core of nditer, without many of the fancy flags (external_loop, buffered) - -.. branch: numpy-speed -Separate iterator from its state so jit can optimize better - -.. branch: numpy-searchsorted -Implement searchsorted without sorter kwarg - -.. branch: openbsd-lib-prefix -add 'lib' prefix to link libraries on OpenBSD From noreply at buildbot.pypy.org Mon Apr 21 23:11:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 23:11:06 +0200 (CEST) Subject: [pypy-commit] pypy default: node should be on default branch Message-ID: <20140421211106.A490F1C0185@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70839:d41b6079f8f5 Date: 2014-04-22 00:08 +0300 http://bitbucket.org/pypy/pypy/changeset/d41b6079f8f5/ Log: node should be on default branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 20e51c4389ed +.. startrev: f659fa9a253f From noreply at buildbot.pypy.org Mon Apr 21 23:11:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 21 Apr 2014 23:11:07 +0200 (CEST) Subject: [pypy-commit] pypy default: changeset 20e51c4389ed, update list of contributors Message-ID: <20140421211107.BE7811C0185@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70840:4d1a65dedd97 Date: 2014-04-22 00:09 +0300 http://bitbucket.org/pypy/pypy/changeset/4d1a65dedd97/ Log: changeset 20e51c4389ed, update list of contributors diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine From noreply at buildbot.pypy.org Tue Apr 22 00:05:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 00:05:02 +0200 (CEST) Subject: [pypy-commit] pypy default: show where test_simple_tcp fails Message-ID: <20140421220502.C8FAB1C0543@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70841:52e1124ad331 Date: 2014-04-21 14:48 -0700 http://bitbucket.org/pypy/pypy/changeset/52e1124ad331/ Log: show where test_simple_tcp fails diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -165,11 +165,11 @@ sock.listen(1) s2 = RSocket(AF_INET, SOCK_STREAM) s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test + connected = False def connecting(): try: s2.connect(addr) - except: - s2.close() + connected = True finally: lock.release() lock = thread.allocate_lock() @@ -180,6 +180,7 @@ s1 = RSocket(fd=fd1) print 'connection accepted' lock.acquire() + assert connected print 'connecting side knows that the connection was accepted too' assert addr.eq(s2.getpeername()) #assert addr2.eq(s2.getsockname()) From noreply at buildbot.pypy.org Tue Apr 22 00:05:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 00:05:04 +0200 (CEST) Subject: [pypy-commit] pypy default: only skip poll tests on win32 Message-ID: <20140421220504.2E7031C0543@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70842:28198ceccc4a Date: 2014-04-21 15:04 -0700 http://bitbucket.org/pypy/pypy/changeset/28198ceccc4a/ Log: only skip poll tests on win32 diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -3,26 +3,42 @@ import py from rpython.rlib.rsocket import * -from rpython.rlib.rpoll import select -try: - from rpython.rlib.rpoll import poll -except ImportError: - py.test.skip('no poll available on this platform') +from rpython.rlib.rpoll import * from rpython.rtyper.test.test_llinterp import interpret +if os.name == 'nt': + has_poll = False +else: + has_poll = True + + def setup_module(mod): rsocket_startup() + def one_in_event(events, fd): assert len(events) == 1 assert events[0][0] == fd assert events[0][1] & POLLIN + def one_out_event(events, fd): assert len(events) == 1 assert events[0][0] == fd assert events[0][1] & POLLOUT + + at py.test.mark.skipif('has_poll') +def test_no_poll(): + try: + poll + except NameError: + pass + else: + assert False + + + at py.test.mark.skipif('not has_poll') def test_simple(): serv = RSocket(AF_INET, SOCK_STREAM) serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) @@ -65,9 +81,9 @@ servconn.close() serv.close() + + at py.test.mark.skipif('not has_poll') def test_exchange(): - if not poll: - py.test.skip('poll not available for this platform') serv = RSocket(AF_INET, SOCK_STREAM) serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) serv.listen(1) @@ -150,6 +166,7 @@ f() interpret(f, []) + def test_select_timeout(): if os.name == 'nt': py.test.skip('cannot select on file handles on windows') @@ -163,10 +180,16 @@ interpret(f, []) -def test_translate(): +def test_translate_select(): from rpython.translator.c.test.test_genc import compile + def func(): + select([], [], [], 0.0) + compile(func, []) + + at py.test.mark.skipif('not has_poll') +def test_translate_poll(): + from rpython.translator.c.test.test_genc import compile def func(): poll({}) - compile(func, []) From noreply at buildbot.pypy.org Tue Apr 22 07:38:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 07:38:14 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_urandom_failure by eliminating unnecessary dependency ctypes_support -> ctypes.util -> tempfile -> random -> urandom Message-ID: <20140422053814.138D61C0543@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70843:ad57911bdbc4 Date: 2014-04-22 01:36 -0400 http://bitbucket.org/pypy/pypy/changeset/ad57911bdbc4/ Log: fix test_urandom_failure by eliminating unnecessary dependency ctypes_support -> ctypes.util -> tempfile -> random -> urandom diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - From noreply at buildbot.pypy.org Tue Apr 22 07:40:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 22 Apr 2014 07:40:41 +0200 (CEST) Subject: [pypy-commit] pypy default: whoops Message-ID: <20140422054041.593AE1C0543@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70844:632b90b2a5af Date: 2014-04-22 08:38 +0300 http://bitbucket.org/pypy/pypy/changeset/632b90b2a5af/ Log: whoops diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: f659fa9a253f +.. startrev: ba569fe1efdb From noreply at buildbot.pypy.org Tue Apr 22 10:26:43 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 22 Apr 2014 10:26:43 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: at least a minimal test for VArrayStructStateInfo Message-ID: <20140422082643.815281D281D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70845:2da3eefd5c41 Date: 2014-04-22 10:00 +0200 http://bitbucket.org/pypy/pypy/changeset/2da3eefd5c41/ Log: at least a minimal test for VArrayStructStateInfo diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -2,7 +2,7 @@ import py from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes, GenerateGuardState, \ - VirtualStatesCantMatch + VirtualStatesCantMatch, VArrayStructStateInfo from rpython.jit.metainterp.optimizeopt.optimizer import OptValue from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from rpython.rtyper.lltypesystem import lltype, llmemory @@ -170,9 +170,12 @@ self.check_invalid(info2, info1) self.check_no_guards(info1, info1) self.check_no_guards(info2, info2) - fldtst(VArrayStateInfo(42), VArrayStateInfo(42)) - fldtst(VStructStateInfo(42, [7]), VStructStateInfo(42, [7])) - fldtst(VirtualStateInfo(ConstInt(42), [7]), VirtualStateInfo(ConstInt(42), [7])) + fakedescr = object() + fielddescr = object() + fldtst(VArrayStateInfo(fakedescr), VArrayStateInfo(fakedescr)) + fldtst(VStructStateInfo(fakedescr, [fielddescr]), VStructStateInfo(fakedescr, [fielddescr])) + fldtst(VirtualStateInfo(ConstInt(42), [fielddescr]), VirtualStateInfo(ConstInt(42), [fielddescr])) + fldtst(VArrayStructStateInfo(fakedescr, [[fielddescr]]), VArrayStructStateInfo(fakedescr, [[fielddescr]])) def test_known_class_generalization(self): knownclass1 = OptValue(BoxPtr()) From noreply at buildbot.pypy.org Tue Apr 22 10:26:44 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 22 Apr 2014 10:26:44 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: properly generate guards for virtual dicts Message-ID: <20140422082644.AEFC51D281D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70846:e973dd1b60d5 Date: 2014-04-22 10:25 +0200 http://bitbucket.org/pypy/pypy/changeset/e973dd1b60d5/ Log: properly generate guards for virtual dicts extremely advanced technology diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -10,7 +10,7 @@ equaloplists from rpython.jit.metainterp.optimizeopt.intutils import IntBound from rpython.jit.metainterp.optimizeopt.virtualize import (VirtualValue, - VArrayValue, VStructValue) + VArrayValue, VStructValue, VArrayStructValue) from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import ResOperation, rop @@ -535,6 +535,34 @@ """ self.guards(info1, info2, value1, expected, [self.nodebox]) + def test_generate_guards_on_virtual_fields_matches_arraystruct(self): + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + arraydescr = object() + fielddescr = object() + + info1 = VArrayStructStateInfo(arraydescr, [[fielddescr]]) + info1.fieldstate = [innerinfo1] + + info2 = VArrayStructStateInfo(arraydescr, [[fielddescr]]) + info2.fieldstate = [innerinfo2] + + value1 = VArrayStructValue(arraydescr, 1, self.nodebox) + value1._items[0][fielddescr] = OptValue(self.nodebox) + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected, [self.nodebox]) + # _________________________________________________________________________ # the below tests don't really have anything to do with guard generation diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -223,7 +223,6 @@ self.fielddescrs = fielddescrs def _generate_guards(self, other, value, state): - # XXX this needs a test in test_virtualstate!!! if not isinstance(other, VArrayStructStateInfo): raise VirtualStatesCantMatch("other is not an VArrayStructStateInfo") if self.arraydescr is not other.arraydescr: @@ -233,14 +232,19 @@ raise VirtualStatesCantMatch("other has a different length") p = 0 + v = None for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): raise VirtualStatesCantMatch("other has a different length") for j in range(len(self.fielddescrs[i])): - if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: + descr = self.fielddescrs[i][j] + if descr is not other.fielddescrs[i][j]: raise VirtualStatesCantMatch("other is a different kind of array") + if value is not None: + assert isinstance(value, virtualize.VArrayStructValue) + v = value._items[i][descr] self.fieldstate[p].generate_guards(other.fieldstate[p], - None, # XXX + v, state) p += 1 From noreply at buildbot.pypy.org Tue Apr 22 10:26:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 22 Apr 2014 10:26:47 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Update these two numbers Message-ID: <20140422082647.85F061D281D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r488:45ee5c962040 Date: 2014-04-22 10:26 +0200 http://bitbucket.org/pypy/pypy.org/changeset/45ee5c962040/ Log: Update these two numbers diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $46527 of $105000 (44.3%) + $50742 of $105000 (48.3%)

      diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -15,7 +15,7 @@ - $45872 of $60000 (76.5%) + $47796 of $60000 (79.6%)
      From noreply at buildbot.pypy.org Tue Apr 22 10:34:21 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 22 Apr 2014 10:34:21 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: a whatsnew entry Message-ID: <20140422083421.7DCFB1D281D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70847:12df2e35d942 Date: 2014-04-22 10:29 +0200 http://bitbucket.org/pypy/pypy/changeset/12df2e35d942/ Log: a whatsnew entry diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,4 +133,7 @@ .. branch: ast-issue1673 fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field \ No newline at end of file +there is missing field + +.. branch: small-unroll-improvements +Improve optimiziation of small allocation-heavy loops in the JIT From noreply at buildbot.pypy.org Tue Apr 22 10:34:25 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 22 Apr 2014 10:34:25 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: merge default Message-ID: <20140422083425.E0FE01D281D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70848:3f7f792e6224 Date: 2014-04-22 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/3f7f792e6224/ Log: merge default diff too long, truncating to 2000 out of 7314 lines diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -217,7 +217,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withrangelist", diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,10 +1,12 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ .. _`pypy/bin/pyinteractive.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/pyinteractive.py @@ -35,7 +37,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -49,21 +50,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -75,6 +76,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -93,16 +99,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less @@ -315,6 +335,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -318,7 +318,7 @@ To read more about the RPython limitations read the `RPython description`_. -.. _`RPython description`: coding-guide.html#restricted-python +.. _`RPython description`: coding-guide.html#rpython-definition --------------------------------------------------------------- Does RPython have anything to do with Zope's Restricted Python? @@ -459,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -92,7 +92,9 @@ `D07.1 Massive Parallelism and Translation Aspects`_ is a report about PyPy's optimization efforts, garbage collectors and massive parallelism (stackless) features. This report refers to the paper `PyPy's approach -to virtual machine construction`_. *(2007-02-28)* +to virtual machine construction`_. Extends the content previously +available in the document "Memory management and threading models as +translation aspects -- solutions and challenges". *(2007-02-28)* diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, @@ -491,7 +485,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -15,19 +15,19 @@ user, describes work in progress, and finally gives references to more implementation details. -This work was done by Remi Meier and Armin Rigo. Thanks to all donors -for crowd-funding the work so far! Please have a look at the 2nd call -for donation (*not ready yet*) +This work was done mostly by Remi Meier and Armin Rigo. Thanks to all +donors for crowd-funding the work so far! Please have a look at the +`2nd call for donation`_. -.. .. _`2nd call for donation`: http://pypy.org/tmdonate2.html +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction ============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of a -regular PyPy, comparing the JITting version in both cases. It is called +listed below, it should be in theory within 25%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -40,7 +40,7 @@ ``pypy-stm`` project is to improve what is so far the state-of-the-art for using multiple CPUs, which for cases where separate processes don't work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is flushing forward an approach to *hide* the threads, as +``pypy-stm`` is pushing forward an approach to *hide* the threads, as described below in `atomic sections`_. @@ -55,9 +55,9 @@ interested in trying it out, you can download a Ubuntu 12.04 binary here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel -(in other words, you get a GIL effect again, but only if trying to -execute more than 4 threads). +"segments", which means that it will run up to four threads in parallel, +in other words it is running a thread pool up to 4 threads emulating normal +threads. To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like @@ -80,6 +80,10 @@ * So far, small examples work fine, but there are still a number of bugs. We're busy fixing them. +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not detected correctly, so may + cause segmentation faults. + * The JIT warm-up time is abysmal (as opposed to the regular PyPy's, which is "only" bad). Moreover, you should run it with a command like ``pypy-stm --jit trace_limit=60000 args...``; the default value of @@ -95,9 +99,11 @@ programs that modify large lists or dicts, suffer from these missing optimizations. -* The GC has no support for destructors: the ``__del__`` method is - never called (including on file objects, which won't be closed for - you). This is of course temporary. +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in @@ -120,6 +126,7 @@ probably, several days or more. .. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h @@ -194,9 +201,9 @@ unchanged. This capability can be hidden in a library or in the framework you use; the end user's code does not need to be explicitly aware of using threads. For a simple example of this, see -`lib_pypy/transaction.py`_. The idea is that if you have a program -where the function ``f(key, value)`` runs on every item of some big -dictionary, you can replace the loop with:: +`transaction.py`_ in ``lib_pypy``. The idea is that if you have a +program where the function ``f(key, value)`` runs on every item of some +big dictionary, you can replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) @@ -217,7 +224,7 @@ is likely to be found, and communicates it to the system, using for example the ``transaction.add()`` scheme. -.. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP ================== diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.3.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -135,5 +135,20 @@ fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when there is missing field +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better + +.. branch: numpy-searchsorted +Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD + .. branch: small-unroll-improvements Improve optimiziation of small allocation-heavy loops in the JIT diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,139 +1,11 @@ ======================= -What's new in PyPy 2.2+ +What's new in PyPy 2.3+ ======================= -.. this is a revision shortly after release-2.2.x -.. startrev: 4cd1bc8b3111 +.. this is a revision shortly after release-2.3.x +.. startrev: ba569fe1efdb -.. branch: release-2.2.x -.. branch: numpy-newbyteorder -Clean up numpy types, add newbyteorder functionality - -.. branch: windows-packaging -Package tk/tcl runtime with win32 - -.. branch: armhf-singlefloat -JIT support for singlefloats on ARM using the hardfloat ABI - -.. branch: voidtype_strformat -Better support for record numpy arrays - -.. branch: osx-eci-frameworks-makefile -OSX: Ensure frameworks end up in Makefile when specified in External compilation info - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace and annotator. - -.. branch: ndarray-buffer -adds support for the buffer= argument to the ndarray ctor - -.. branch: better_ftime_detect2 -On OpenBSD do not pull in libcompat.a as it is about to be removed. -And more generally, if you have gettimeofday(2) you will not need ftime(3). - -.. branch: timeb_h -Remove dependency upon on OpenBSD. This will be disappearing -along with libcompat.a. - -.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 -Fix 3 broken links on PyPy published papers in docs. - -.. branch: jit-ordereddict - -.. branch: refactor-str-types -Remove multimethods on str/unicode/bytearray and make the implementations share code. - -.. branch: remove-del-from-generatoriterator -Speed up generators that don't yield inside try or wait blocks by skipping -unnecessary cleanup. - -.. branch: annotator -Remove FlowObjSpace. -Improve cohesion between rpython.flowspace and rpython.annotator. - -.. branch: detect-immutable-fields -mapdicts keep track of whether or not an attribute is every assigned to -multiple times. If it's only assigned once then an elidable lookup is used when -possible. - -.. branch: precompiled-headers -Create a Makefile using precompiled headers for MSVC platforms. -The downside is a messy nmake-compatible Makefile. Since gcc shows minimal -speedup, it was not implemented. - -.. branch: camelot -With a properly configured 256-color terminal (TERM=...-256color), the -Mandelbrot set shown during translation now uses a range of 50 colours. -Essential! - -.. branch: NonConstant -Simplify implementation of NonConstant. - -.. branch: array-propagate-len -Kill some guards and operations in JIT traces by adding integer bounds -propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). - -.. branch: optimize-int-and -Optimize away INT_AND with constant mask of 1s that fully cover the bitrange -of other operand. - -.. branch: bounds-int-add-or -Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the -operands are positive to kill some guards - -.. branch: remove-intlong-smm -kills int/long/smalllong/bool multimethods - -.. branch: numpy-refactor -Cleanup micronumpy module - -.. branch: int_w-refactor -In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. - -.. branch: test-58c3d8552833 -Fix for getarrayitem_gc_pure optimization - -.. branch: simple-range-strategy -Implements SimpleRangeListStrategy for case range(n) where n is a positive number. -Makes some traces nicer by getting rid of multiplication for calculating loop counter -and propagates that n > 0 further to get rid of guards. - -.. branch: popen-pclose -Provide an exit status for popen'ed RFiles via pclose - -.. branch: stdlib-2.7.6 -Update stdlib to v2.7.6 - -.. branch: virtual-raw-store-load -Support for virtualizing raw_store/raw_load operations - -.. branch: refactor-buffer-api -Separate the interp-level buffer API from the buffer type exposed to -app-level. The `Buffer` class is now used by `W_MemoryView` and -`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was -an alias to `Buffer`, which was wrappable itself. - -.. branch: improve-consecutive-dict-lookups -Improve the situation when dict lookups of the same key are performed in a chain - -.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 -.. branch: test_SetFromErrnoWithFilename_NULL -.. branch: test_SetFromErrnoWithFilename__tweaks - -.. branch: refactor_PyErr_SetFromErrnoWithFilename -Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext - -.. branch: win32-fixes4 -fix more tests for win32 - -.. branch: latest-improve-doc -Fix broken links in documentation - -.. branch: ast-issue1673 -fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field .. branch: small-unroll-improvements Improve optimiziation of small allocation-heavy loops in the JIT diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -86,11 +86,26 @@ option (this is the default at some optimization levels like ``-O1``, but unneeded for high-performance translations like ``-O2``). You may get it at -http://www.hpl.hp.com/personal/Hans_Boehm/gc/gc_source/gc-7.1.tar.gz +http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with -pypy. Unpack this folder in the base directory. Then open a command -prompt:: +pypy. Unpack this folder in the base directory. +The default GC_abort(...) function in misc.c will try to open a MessageBox. +You may want to disable this with the following patch:: + + --- a/misc.c Sun Apr 20 14:08:27 2014 +0300 + +++ b/misc.c Sun Apr 20 14:08:37 2014 +0300 + @@ -1058,7 +1058,7 @@ + #ifndef PCR + void GC_abort(const char *msg) + { + -# if defined(MSWIN32) + +# if 0 && defined(MSWIN32) + (void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK); + # else + GC_err_printf("%s\n", msg); + +Then open a command prompt:: cd gc-7.1 nmake -f NT_THREADS_MAKEFILE diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -337,6 +337,9 @@ return 'internal subclass of %s' % (Class.__name__,) wrappable_class_name._annspecialcase_ = 'specialize:memo' +class CannotHaveLock(Exception): + """Raised by space.allocate_lock() if we're translating.""" + # ____________________________________________________________ class ObjSpace(object): @@ -440,10 +443,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -458,15 +462,25 @@ raise oefmt(self.w_SystemError, "getbuiltinmodule() called with non-builtin module %s", name) + + # Add the module to sys.modules and initialize the module. The + # order is important to avoid recursions. + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # create a copy of the module. (see issue1514) eventlet + # patcher relies on this behaviour. + w_mod2 = self.wrap(Module(self, w_name)) + self.setitem(w_modules, w_name, w_mod2) + w_mod.getdict(self) # unlazy w_initialdict + self.call_method(w_mod2.getdict(self), 'update', + w_mod.w_initialdict) + return w_mod2 + self.setitem(w_modules, w_name, w_mod) + w_mod.init(self) else: - # Initialize the module - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - w_mod.init(self) - - # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) - return w_mod + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" @@ -663,6 +677,11 @@ def __allocate_lock(self): from rpython.rlib.rthread import allocate_lock, error + # hack: we can't have prebuilt locks if we're translating. + # In this special situation we should just not lock at all + # (translation is not multithreaded anyway). + if not we_are_translated() and self.config.translating: + raise CannotHaveLock() try: return allocate_lock() except error: diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -14,6 +14,7 @@ # after startup(). w_initialdict = None lazy = False + submodule_name = None def __init__(self, space, w_name): """ NOT_RPYTHON """ @@ -31,6 +32,8 @@ space = self.space name = space.unwrap(self.w_name) for sub_name, module_cls in self.submodules.iteritems(): + if module_cls.submodule_name is None: + module_cls.submodule_name = sub_name module_name = space.wrap("%s.%s" % (name, sub_name)) m = module_cls(space, module_name) m.install() @@ -134,6 +137,8 @@ cls.loaders = loaders = {} pkgroot = cls.__module__ appname = cls.get_applevel_name() + if cls.submodule_name is not None: + appname += '.%s' % (cls.submodule_name,) for name, spec in cls.interpleveldefs.items(): loaders[name] = getinterpevalloader(pkgroot, spec) for name, spec in cls.appleveldefs.items(): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -26,5 +26,7 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + assert lltype.typeOf(res) == rffi.LONG + assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/__pypy__/app_signal.py b/pypy/module/__pypy__/app_signal.py --- a/pypy/module/__pypy__/app_signal.py +++ b/pypy/module/__pypy__/app_signal.py @@ -1,4 +1,9 @@ -import __pypy__.thread +import thread +# ^^ relative import of __pypy__.thread. Note that some tests depend on +# this (test_enable_signals in test_signal.py) to work properly, +# otherwise they get caught in some deadlock waiting for the import +# lock... + class SignalsEnabled(object): '''A context manager to use in non-main threads: @@ -8,7 +13,7 @@ that is within a "with signals_enabled:". This other thread should be ready to handle unexpected exceptions that the signal handler might raise --- notably KeyboardInterrupt.''' - __enter__ = __pypy__.thread._signals_enter - __exit__ = __pypy__.thread._signals_exit + __enter__ = thread._signals_enter + __exit__ = thread._signals_exit signals_enabled = SignalsEnabled() diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -8,6 +8,7 @@ def test_signal(self): from __pypy__ import thread + assert type(thread.signals_enabled).__module__ == '__pypy__.thread' with thread.signals_enabled: pass # assert did not crash diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1091,7 +1091,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1101,7 +1101,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1113,7 +1113,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -48,11 +48,9 @@ raises(OverflowError, _cffi_backend.newp, SHORT_ARRAY, [-40000]) def test_fast_init_longlong_from_list(self): - if type(2 ** 50) is long: - large_int = 2 ** 30 - else: - large_int = 2 ** 50 import _cffi_backend + import sys + large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) LONGLONG = _cffi_backend.new_primitive_type('long long') P_LONGLONG = _cffi_backend.new_pointer_type(LONGLONG) LONGLONG_ARRAY = _cffi_backend.new_array_type(P_LONGLONG, None) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -137,6 +137,7 @@ self.check_mode_ok(mode) stream = streamio.fdopen_as_stream(fd, mode, buffering, signal_checker(self.space)) + self.check_not_dir(fd) self.fdopenstream(stream, fd, mode) def direct_close(self): diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -4,7 +4,7 @@ from rpython.rlib.streamio import StreamErrors from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import ObjSpace, W_Root +from pypy.interpreter.baseobjspace import ObjSpace, W_Root, CannotHaveLock from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror @@ -33,19 +33,24 @@ def _try_acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock - if self.slock is None: - self.slock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.slockowner is me: return False # already acquired by the current thread - self.slock.acquire(True) + try: + if self.slock is None: + self.slock = self.space.allocate_lock() + except CannotHaveLock: + pass + else: + self.slock.acquire(True) assert self.slockowner is None self.slockowner = me return True def _release_lock(self): self.slockowner = None - self.slock.release() + if self.slock is not None: + self.slock.release() def lock(self): if not self._try_acquire_lock(): diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -1,7 +1,6 @@ import py from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir UNICODE_REPLACEMENT_CHARACTER = u'\uFFFD' @@ -15,7 +14,7 @@ return 'EncodeDecodeError(%r, %r, %r)' % (self.start, self.end, self.reason) -srcdir = py.path.local(cdir) +srcdir = py.path.local(__file__).dirpath() codecs = [ # _codecs_cn diff --git a/rpython/translator/c/src/cjkcodecs/README b/pypy/module/_multibytecodec/src/cjkcodecs/README rename from rpython/translator/c/src/cjkcodecs/README rename to pypy/module/_multibytecodec/src/cjkcodecs/README diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_cn.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c rename from rpython/translator/c/src/cjkcodecs/_codecs_cn.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_hk.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c rename from rpython/translator/c/src/cjkcodecs/_codecs_hk.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_hk.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c rename from rpython/translator/c/src/cjkcodecs/_codecs_iso2022.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_iso2022.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_jp.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c rename from rpython/translator/c/src/cjkcodecs/_codecs_jp.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_jp.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_kr.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c rename from rpython/translator/c/src/cjkcodecs/_codecs_kr.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_kr.c diff --git a/rpython/translator/c/src/cjkcodecs/_codecs_tw.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c rename from rpython/translator/c/src/cjkcodecs/_codecs_tw.c rename to pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c diff --git a/rpython/translator/c/src/cjkcodecs/alg_jisx0201.h b/pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h rename from rpython/translator/c/src/cjkcodecs/alg_jisx0201.h rename to pypy/module/_multibytecodec/src/cjkcodecs/alg_jisx0201.h diff --git a/rpython/translator/c/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h rename from rpython/translator/c/src/cjkcodecs/cjkcodecs.h rename to pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h diff --git a/rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h b/pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h rename from rpython/translator/c/src/cjkcodecs/emu_jisx0213_2000.h rename to pypy/module/_multibytecodec/src/cjkcodecs/emu_jisx0213_2000.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_cn.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h rename from rpython/translator/c/src/cjkcodecs/mappings_cn.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_cn.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_hk.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h rename from rpython/translator/c/src/cjkcodecs/mappings_hk.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_hk.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h rename from rpython/translator/c/src/cjkcodecs/mappings_jisx0213_pair.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jisx0213_pair.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_jp.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h rename from rpython/translator/c/src/cjkcodecs/mappings_jp.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_jp.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_kr.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h rename from rpython/translator/c/src/cjkcodecs/mappings_kr.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_kr.h diff --git a/rpython/translator/c/src/cjkcodecs/mappings_tw.h b/pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h rename from rpython/translator/c/src/cjkcodecs/mappings_tw.h rename to pypy/module/_multibytecodec/src/cjkcodecs/mappings_tw.h diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c rename from rpython/translator/c/src/cjkcodecs/multibytecodec.c rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h rename from rpython/translator/c/src/cjkcodecs/multibytecodec.h rename to pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,5 +1,6 @@ import py + at py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: py.test.skip("genreflex is not installed") diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,13 +2,13 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat +import sys, os, stat, genericpath from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, generic_new_descr from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock from pypy.interpreter.eval import Code from pypy.interpreter.pycode import PyCode from rpython.rlib import streamio, jit @@ -522,7 +522,8 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - if os.path.isdir(filepart) and case_ok(filepart): + # os.path.isdir on win32 is not rpython when pywin32 installed + if genericpath.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): @@ -579,7 +580,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None @@ -753,26 +755,14 @@ me = self.space.getexecutioncontext() # used as thread ident return self.lockowner is me - def _can_have_lock(self): - # hack: we can't have self.lock != None during translation, - # because prebuilt lock objects are not allowed. In this - # special situation we just don't lock at all (translation is - # not multithreaded anyway). - if we_are_translated(): - return True # we need a lock at run-time - elif self.space.config.translating: - assert self.lock is None - return False - else: - return True # in py.py - def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock if self.lock is None: - if not self._can_have_lock(): + try: + self.lock = self.space.allocate_lock() + except CannotHaveLock: return - self.lock = self.space.allocate_lock() me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is me: pass # already acquired by the current thread @@ -790,7 +780,7 @@ # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return - if not self._can_have_lock(): + if self.lock is None: # CannotHaveLock occurred return space = self.space raise OperationError(space.w_RuntimeError, diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,7 +203,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -223,7 +222,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,7 +578,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -586,20 +585,19 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import sys, time oldpath = sys.path - time.tzset = "" + time.tzname = "" del sys.modules['time'] import time as time1 assert sys.modules['time'] is time1 - assert time.tzset == "" + assert time.tzname == "" - reload(time1) # don't leave a broken time.tzset behind + reload(time1) # don't leave a broken time.tzname behind import time - assert time.tzset != "" + assert time.tzname != "" def test_reload_infinite(self): import infinite_reload diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,6 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', + 'nditer': 'nditer.nditer', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import math import _numpypy diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -37,7 +37,7 @@ "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -388,6 +388,8 @@ w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': w_res = w_lhs.descr_sub(interp.space, w_rhs) + elif self.name == '**': + w_res = w_lhs.descr_pow(interp.space, w_rhs) elif self.name == '->': if isinstance(w_rhs, FloatObject): w_rhs = IntObject(int(w_rhs.floatval)) @@ -596,6 +598,8 @@ arg = self.args[1].execute(interp) if self.name == 'view': w_res = arr.descr_view(interp.space, arg) + elif self.name == 'astype': + w_res = arr.descr_astype(interp.space, arg) else: assert False else: @@ -620,7 +624,7 @@ (':', 'colon'), ('\w+', 'identifier'), ('\]', 'array_right'), - ('(->)|[\+\-\*\/]', 'operator'), + ('(->)|[\+\-\*\/]+', 'operator'), ('=', 'assign'), (',', 'comma'), ('\|', 'pipe'), diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -284,9 +284,11 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return ArrayIter(self, support.product(shape), shape, r[0], r[1]) - return ArrayIter(self, self.get_size(), self.shape, - self.strides, self.backstrides) + i = ArrayIter(self, support.product(shape), shape, r[0], r[1]) + else: + i = ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) + return i, i.reset() def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop, ufuncs +from pypy.module.micronumpy import descriptor, loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter @@ -156,10 +156,10 @@ "string is smaller than requested size")) a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) - ai = a.create_iter() + ai, state = a.create_iter() for val in items: - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) return space.wrap(a) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -32,24 +32,23 @@ self.reset() def reset(self): - self.iter = self.base.create_iter() + self.iter, self.state = self.base.create_iter() def descr_len(self, space): - return space.wrap(self.base.get_size()) + return space.wrap(self.iter.size) def descr_next(self, space): - if self.iter.done(): + if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) - w_res = self.iter.getitem() - self.iter.next() + w_res = self.iter.getitem(self.state) + self.state = self.iter.next(self.state) return w_res From noreply at buildbot.pypy.org Tue Apr 22 10:40:55 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 22 Apr 2014 10:40:55 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: there's an extra guard (which is optimized away) Message-ID: <20140422084055.1711D1C06C3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70849:af2d6463e471 Date: 2014-04-22 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/af2d6463e471/ Log: there's an extra guard (which is optimized away) diff --git a/rpython/jit/tool/test/test_jitoutput.py b/rpython/jit/tool/test/test_jitoutput.py --- a/rpython/jit/tool/test/test_jitoutput.py +++ b/rpython/jit/tool/test/test_jitoutput.py @@ -38,7 +38,7 @@ assert info.ops.total == 2 assert info.recorded_ops.total == 2 assert info.recorded_ops.calls == 0 - assert info.guards == 1 + assert info.guards == 2 assert info.opt_ops == 13 assert info.opt_guards == 2 assert info.forcings == 0 From noreply at buildbot.pypy.org Tue Apr 22 11:47:16 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 22 Apr 2014 11:47:16 +0200 (CEST) Subject: [pypy-commit] pypy small-unroll-improvements: close to-be-merged branch Message-ID: <20140422094716.4505B1D2380@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: small-unroll-improvements Changeset: r70850:eecccc44bf91 Date: 2014-04-22 11:40 +0200 http://bitbucket.org/pypy/pypy/changeset/eecccc44bf91/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Tue Apr 22 11:47:17 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 22 Apr 2014 11:47:17 +0200 (CEST) Subject: [pypy-commit] pypy default: merge small-unroll-improvements Message-ID: <20140422094717.F3E921D2380@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r70851:82141f03207d Date: 2014-04-22 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/82141f03207d/ Log: merge small-unroll-improvements a cleanup and generalization of unroll, in particularly the virtual state handling. reduces code duplication and various hacks. Fixes a few rare miscompiles. This also improves optimization by generalizing a few matching cases in the virtualstate matching. diff too long, truncating to 2000 out of 2662 lines diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -149,3 +149,6 @@ .. branch: openbsd-lib-prefix add 'lib' prefix to link libraries on OpenBSD + +.. branch: small-unroll-improvements +Improve optimiziation of small allocation-heavy loops in the JIT diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,7 @@ .. this is a revision shortly after release-2.3.x .. startrev: ba569fe1efdb + + +.. branch: small-unroll-improvements +Improve optimiziation of small allocation-heavy loops in the JIT diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -106,7 +106,7 @@ def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, - resume_at_jump_descr, full_preamble_needed=True, + full_preamble_needed=True, try_disabling_unroll=False): """Try to compile a new procedure by closing the current history back to the first operation. @@ -128,7 +128,6 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] @@ -187,7 +186,7 @@ def compile_retrace(metainterp, greenkey, start, inputargs, jumpargs, - resume_at_jump_descr, partial_trace, resumekey): + partial_trace, resumekey): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -203,7 +202,6 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] - part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ @@ -765,7 +763,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey, resume_at_jump_descr=None): +def compile_trace(metainterp, resumekey): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -781,7 +779,6 @@ # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] - new_trace.resume_at_jump_descr = resume_at_jump_descr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -628,7 +628,6 @@ call_pure_results = None logops = None quasi_immutable_deps = None - resume_at_jump_descr = None def _token(*args): raise Exception("TreeLoop.token is killed") diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -31,6 +31,12 @@ def clone(self): return LenBound(self.mode, self.descr, self.bound.clone()) + def generalization_of(self, other): + return (other is not None and + self.mode == other.mode and + self.descr == other.descr and + self.bound.contains_bound(other.bound)) + class OptValue(object): __metaclass__ = extendabletype _attrs_ = ('box', 'known_class', 'last_guard', 'level', 'intbound', 'lenbound') @@ -129,13 +135,21 @@ def force_at_end_of_preamble(self, already_forced, optforce): return self - def get_args_for_fail(self, modifier): + # visitor API + + def visitor_walk_recursive(self, visitor): pass - def make_virtual_info(self, modifier, fieldnums): - #raise NotImplementedError # should not be called on this level - assert fieldnums is None - return modifier.make_not_virtual(self) + @specialize.argtype(1) + def visitor_dispatch_virtual_type(self, visitor): + if self.is_virtual(): + return self._visitor_dispatch_virtual_type(visitor) + else: + return visitor.visit_not_virtual(self) + + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + assert 0, "unreachable" def is_constant(self): return self.level == LEVEL_CONSTANT diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -543,6 +543,9 @@ return self.emit_operation(op) + def optimize_GUARD_FUTURE_CONDITION(self, op): + pass # just remove it + def optimize_INT_FLOORDIV(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -61,6 +61,9 @@ op.setdescr(descr.target_tokens[0]) self.emit_operation(op) + def optimize_GUARD_FUTURE_CONDITION(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) OptSimplify.propagate_forward = dispatch_opt diff --git a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeMetaInterpStaticData) from rpython.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from rpython.jit.metainterp.resoperation import rop, opname, ResOperation @@ -8,6 +8,8 @@ from py.test import raises from rpython.jit.metainterp.optimizeopt.optimizer import Optimization from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimizeopt.heap import OptHeap +from rpython.jit.metainterp.optimizeopt.rewrite import OptRewrite class BaseTestMultiLabel(BaseTest): @@ -20,7 +22,6 @@ part = TreeLoop('part') part.inputargs = loop.inputargs - part.resume_at_jump_descr = FakeDescrWithSnapshot() token = loop.original_jitcell_token optimized = TreeLoop('optimized') @@ -42,6 +43,7 @@ operations.append(label) part.operations = operations + self.add_guard_future_condition(part) self._do_optimize_loop(part, None) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] @@ -502,7 +504,7 @@ self.loop = loop loop.call_pure_results = args_dict() metainterp_sd = FakeMetaInterpStaticData(self.cpu) - optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + optimize_unroll(metainterp_sd, loop, [OptRewrite(), OptRenameStrlen(), OptHeap(), OptPure()], True) def test_optimizer_renaming_boxes1(self): ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -61,24 +61,6 @@ lst6 = virt1._get_field_descr_list() assert lst6 is lst3 -def test_reuse_vinfo(): - class FakeVInfo(object): - def set_content(self, fieldnums): - self.fieldnums = fieldnums - def equals(self, fieldnums): - return self.fieldnums == fieldnums - class FakeVirtualValue(virtualize.AbstractVirtualValue): - def _make_virtual(self, *args): - return FakeVInfo() - v1 = FakeVirtualValue(None, None) - vinfo1 = v1.make_virtual_info(None, [1, 2, 4]) - vinfo2 = v1.make_virtual_info(None, [1, 2, 4]) - assert vinfo1 is vinfo2 - vinfo3 = v1.make_virtual_info(None, [1, 2, 6]) - assert vinfo3 is not vinfo2 - vinfo4 = v1.make_virtual_info(None, [1, 2, 6]) - assert vinfo3 is vinfo4 - def test_descrlist_dict(): from rpython.jit.metainterp.optimizeopt import util as optimizeutil h1 = optimizeutil.descrlist_hash([]) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -51,7 +51,8 @@ if expected_preamble: expected_preamble = self.parse(expected_preamble) if expected_short: - expected_short = self.parse(expected_short) + # the short preamble doesn't have fail descrs, they are patched in when it is used + expected_short = self.parse(expected_short, want_fail_descr=False) preamble = self.unroll_and_optimize(loop, call_pure_results) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -355,11 +355,21 @@ class BaseTest(object): - def parse(self, s, boxkinds=None): + def parse(self, s, boxkinds=None, want_fail_descr=True): + if want_fail_descr: + invent_fail_descr = self.invent_fail_descr + else: + invent_fail_descr = lambda *args: None return parse(s, self.cpu, self.namespace, type_system=self.type_system, boxkinds=boxkinds, - invent_fail_descr=self.invent_fail_descr) + invent_fail_descr=invent_fail_descr) + + def add_guard_future_condition(self, res): + # invent a GUARD_FUTURE_CONDITION to not have to change all tests + if res.operations[-1].getopnum() == rop.JUMP: + guard = ResOperation(rop.GUARD_FUTURE_CONDITION, [], None, descr=self.invent_fail_descr(None, -1, [])) + res.operations.insert(-1, guard) def invent_fail_descr(self, model, opnum, fail_args): if fail_args is None: @@ -397,6 +407,7 @@ optimize_trace(metainterp_sd, loop, self.enable_opts) def unroll_and_optimize(self, loop, call_pure_results=None): + self.add_guard_future_condition(loop) operations = loop.operations jumpop = operations[-1] assert jumpop.getopnum() == rop.JUMP @@ -408,7 +419,6 @@ preamble = TreeLoop('preamble') preamble.inputargs = inputargs - preamble.resume_at_jump_descr = FakeDescrWithSnapshot() token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ @@ -419,7 +429,6 @@ assert preamble.operations[-1].getopnum() == rop.LABEL inliner = Inliner(inputargs, jump_args) - loop.resume_at_jump_descr = preamble.resume_at_jump_descr loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], @@ -450,18 +459,6 @@ def __eq__(self, other): return isinstance(other, FakeDescr) -class FakeDescrWithSnapshot(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescrWithSnapshot() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) - - def convert_old_style_to_targets(loop, jump): newloop = TreeLoop(loop.name) newloop.inputargs = loop.inputargs diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -1,43 +1,103 @@ from __future__ import with_statement import py -from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ - VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes + VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes, GenerateGuardState, \ + VirtualStatesCantMatch, VArrayStructStateInfo from rpython.jit.metainterp.optimizeopt.optimizer import OptValue from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ - equaloplists, FakeDescrWithSnapshot + equaloplists from rpython.jit.metainterp.optimizeopt.intutils import IntBound +from rpython.jit.metainterp.optimizeopt.virtualize import (VirtualValue, + VArrayValue, VStructValue, VArrayStructValue) from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import ResOperation, rop -class TestBasic: - someptr1 = LLtypeMixin.myptr - someptr2 = LLtypeMixin.myptr2 +class BaseTestGenerateGuards(BaseTest): + + def _box_or_value(self, box_or_value=None): + if box_or_value is None: + return None, None + elif isinstance(box_or_value, OptValue): + value = box_or_value + box = value.box + else: + box = box_or_value + value = OptValue(box) + return value, box + + def guards(self, info1, info2, box_or_value, expected, inputargs=None): + value, box = self._box_or_value(box_or_value) + if inputargs is None: + inputargs = [box] + info1.position = info2.position = 0 + state = GenerateGuardState(self.cpu) + info1.generate_guards(info2, value, state) + self.compare(state.extra_guards, expected, inputargs) + + def compare(self, guards, expected, inputargs): + loop = self.parse(expected) + boxmap = {} + assert len(loop.inputargs) == len(inputargs) + for a, b in zip(loop.inputargs, inputargs): + boxmap[a] = b + for op in loop.operations: + if op.is_guard(): + op.setdescr(None) + assert equaloplists(guards, loop.operations, False, + boxmap) + + def check_no_guards(self, info1, info2, box_or_value=None, state=None): + value, _ = self._box_or_value(box_or_value) + if info1.position == -1: + info1.position = 0 + if info2.position == -1: + info2.position = 0 + if state is None: + state = GenerateGuardState(self.cpu) + info1.generate_guards(info2, value, state) + assert not state.extra_guards + return state + + def check_invalid(self, info1, info2, box_or_value=None, state=None): + value, _ = self._box_or_value(box_or_value) + if info1.position == -1: + info1.position = 0 + if info2.position == -1: + info2.position = 0 + if state is None: + state = GenerateGuardState(self.cpu) + with py.test.raises(VirtualStatesCantMatch): + info1.generate_guards(info2, value, state) + def test_position_generalization(self): def postest(info1, info2): info1.position = 0 - assert info1.generalization_of(info1, {}, {}) + self.check_no_guards(info1, info1) info2.position = 0 - assert info1.generalization_of(info2, {}, {}) + self.check_no_guards(info1, info2) info2.position = 1 - renum = {} - assert info1.generalization_of(info2, renum, {}) - assert renum == {0:1} - assert info1.generalization_of(info2, {0:1}, {}) - assert info1.generalization_of(info2, {1:1}, {}) - bad = {} - assert not info1.generalization_of(info2, {0:0}, bad) - assert info1 in bad and info2 in bad + state = self.check_no_guards(info1, info2) + assert state.renum == {0:1} + + assert self.check_no_guards(info1, info2, state=state) + + # feed fake renums + state.renum = {1: 1} + self.check_no_guards(info1, info2, state=state) + + state.renum = {0: 0} + self.check_invalid(info1, info2, state=state) + assert info1 in state.bad and info2 in state.bad for BoxType in (BoxInt, BoxFloat, BoxPtr): info1 = NotVirtualStateInfo(OptValue(BoxType())) info2 = NotVirtualStateInfo(OptValue(BoxType())) postest(info1, info2) - + info1, info2 = VArrayStateInfo(42), VArrayStateInfo(42) info1.fieldstate = info2.fieldstate = [] postest(info1, info2) @@ -56,7 +116,7 @@ info1.position = 0 info2 = NotVirtualStateInfo(value2) info2.position = 0 - return info1.generalization_of(info2, {}, {}) + return VirtualState([info1]).generalization_of(VirtualState([info2]), cpu=self.cpu) assert isgeneral(OptValue(BoxInt()), OptValue(ConstInt(7))) assert not isgeneral(OptValue(ConstInt(7)), OptValue(BoxInt())) @@ -65,10 +125,11 @@ nonnull = OptValue(BoxPtr()) nonnull.make_nonnull(0) knownclass = OptValue(BoxPtr()) - knownclass.make_constant_class(ConstPtr(self.someptr1), 0) + clsbox = self.cpu.ts.cls_of_box(BoxPtr(self.myptr)) + knownclass.make_constant_class(clsbox, 0) const = OptValue(BoxPtr) - const.make_constant_class(ConstPtr(self.someptr1), 0) - const.make_constant(ConstPtr(self.someptr1)) + const.make_constant_class(clsbox, 0) + const.make_constant(ConstPtr(self.myptr)) inorder = [ptr, nonnull, knownclass, const] for i in range(len(inorder)): for j in range(i, len(inorder)): @@ -91,48 +152,51 @@ value1 = OptValue(BoxPtr()) value1.make_nonnull(None) - value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + value2 = OptValue(ConstPtr(self.nullptr)) assert not isgeneral(value1, value2) def test_field_matching_generalization(self): const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) const1.position = const2.position = 1 - assert not const1.generalization_of(const2, {}, {}) - assert not const2.generalization_of(const1, {}, {}) + self.check_invalid(const1, const2) + self.check_invalid(const2, const1) def fldtst(info1, info2): info1.position = info2.position = 0 info1.fieldstate = [const1] info2.fieldstate = [const2] - assert not info1.generalization_of(info2, {}, {}) - assert not info2.generalization_of(info1, {}, {}) - assert info1.generalization_of(info1, {}, {}) - assert info2.generalization_of(info2, {}, {}) - fldtst(VArrayStateInfo(42), VArrayStateInfo(42)) - fldtst(VStructStateInfo(42, [7]), VStructStateInfo(42, [7])) - fldtst(VirtualStateInfo(ConstInt(42), [7]), VirtualStateInfo(ConstInt(42), [7])) + self.check_invalid(info1, info2) + self.check_invalid(info2, info1) + self.check_no_guards(info1, info1) + self.check_no_guards(info2, info2) + fakedescr = object() + fielddescr = object() + fldtst(VArrayStateInfo(fakedescr), VArrayStateInfo(fakedescr)) + fldtst(VStructStateInfo(fakedescr, [fielddescr]), VStructStateInfo(fakedescr, [fielddescr])) + fldtst(VirtualStateInfo(ConstInt(42), [fielddescr]), VirtualStateInfo(ConstInt(42), [fielddescr])) + fldtst(VArrayStructStateInfo(fakedescr, [[fielddescr]]), VArrayStructStateInfo(fakedescr, [[fielddescr]])) def test_known_class_generalization(self): knownclass1 = OptValue(BoxPtr()) - knownclass1.make_constant_class(ConstPtr(self.someptr1), 0) + knownclass1.make_constant_class(ConstPtr(self.myptr), 0) info1 = NotVirtualStateInfo(knownclass1) info1.position = 0 knownclass2 = OptValue(BoxPtr()) - knownclass2.make_constant_class(ConstPtr(self.someptr1), 0) + knownclass2.make_constant_class(ConstPtr(self.myptr), 0) info2 = NotVirtualStateInfo(knownclass2) info2.position = 0 - assert info1.generalization_of(info2, {}, {}) - assert info2.generalization_of(info1, {}, {}) + self.check_no_guards(info1, info2) + self.check_no_guards(info2, info1) knownclass3 = OptValue(BoxPtr()) - knownclass3.make_constant_class(ConstPtr(self.someptr2), 0) + knownclass3.make_constant_class(ConstPtr(self.myptr2), 0) info3 = NotVirtualStateInfo(knownclass3) info3.position = 0 - assert not info1.generalization_of(info3, {}, {}) - assert not info2.generalization_of(info3, {}, {}) - assert not info3.generalization_of(info2, {}, {}) - assert not info3.generalization_of(info1, {}, {}) + self.check_invalid(info1, info3) + self.check_invalid(info2, info3) + self.check_invalid(info3, info2) + self.check_invalid(info3, info1) def test_circular_generalization(self): @@ -140,29 +204,157 @@ VirtualStateInfo(ConstInt(42), [7])): info.position = 0 info.fieldstate = [info] - assert info.generalization_of(info, {}, {}) + self.check_no_guards(info, info) -class BaseTestGenerateGuards(BaseTest): - def guards(self, info1, info2, box, expected): - info1.position = info2.position = 0 - guards = [] - info1.generate_guards(info2, box, self.cpu, guards, {}) - self.compare(guards, expected, [box]) + def test_generate_guards_nonvirtual_all_combinations(self): + # set up infos + unknown_val = OptValue(self.nodebox) + unknownnull_val = OptValue(BoxPtr(self.nullptr)) + unknown_info = NotVirtualStateInfo(unknown_val) - def compare(self, guards, expected, inputargs): - loop = self.parse(expected) - boxmap = {} - assert len(loop.inputargs) == len(inputargs) - for a, b in zip(loop.inputargs, inputargs): - boxmap[a] = b - for op in loop.operations: - if op.is_guard(): - op.setdescr(None) - assert equaloplists(guards, loop.operations, False, - boxmap) + nonnull_val = OptValue(self.nodebox) + nonnull_val.make_nonnull(None) + nonnull_info = NotVirtualStateInfo(nonnull_val) + + knownclass_val = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + knownclass_val.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(knownclass_val) + knownclass2_val = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + knownclass2_val.make_constant_class(classbox, -1) + knownclass2_info = NotVirtualStateInfo(knownclass2_val) + + constant_val = OptValue(BoxInt()) + constant_val.make_constant(ConstInt(1)) + constant_info = NotVirtualStateInfo(constant_val) + constclass_val = OptValue(self.nodebox) + constclass_val.make_constant(self.nodebox.constbox()) + constclass_info = NotVirtualStateInfo(constclass_val) + constclass2_val = OptValue(self.nodebox) + constclass2_val.make_constant(self.nodebox2.constbox()) + constclass2_info = NotVirtualStateInfo(constclass2_val) + constantnull_val = OptValue(ConstPtr(self.nullptr)) + constantnull_info = NotVirtualStateInfo(constantnull_val) + + # unknown unknown + self.check_no_guards(unknown_info, unknown_info, unknown_val) + self.check_no_guards(unknown_info, unknown_info) + + # unknown nonnull + self.check_no_guards(unknown_info, nonnull_info, nonnull_val) + self.check_no_guards(unknown_info, nonnull_info) + + # unknown knownclass + self.check_no_guards(unknown_info, knownclass_info, knownclass_val) + self.check_no_guards(unknown_info, knownclass_info) + + # unknown constant + self.check_no_guards(unknown_info, constant_info, constant_val) + self.check_no_guards(unknown_info, constant_info) + + + # nonnull unknown + expected = """ + [p0] + guard_nonnull(p0) [] + """ + self.guards(nonnull_info, unknown_info, unknown_val, expected) + self.check_invalid(nonnull_info, unknown_info, unknownnull_val) + self.check_invalid(nonnull_info, unknown_info) + self.check_invalid(nonnull_info, unknown_info) + + # nonnull nonnull + self.check_no_guards(nonnull_info, nonnull_info, nonnull_val) + self.check_no_guards(nonnull_info, nonnull_info, nonnull_val) + + # nonnull knownclass + self.check_no_guards(nonnull_info, knownclass_info, knownclass_val) + self.check_no_guards(nonnull_info, knownclass_info) + + # nonnull constant + self.check_no_guards(nonnull_info, constant_info, constant_val) + self.check_invalid(nonnull_info, constantnull_info, constantnull_val) + self.check_no_guards(nonnull_info, constant_info) + self.check_invalid(nonnull_info, constantnull_info) + + + # knownclass unknown + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(knownclass_info, unknown_info, unknown_val, expected) + self.check_invalid(knownclass_info, unknown_info, unknownnull_val) + self.check_invalid(knownclass_info, unknown_info, knownclass2_val) + self.check_invalid(knownclass_info, unknown_info) + self.check_invalid(knownclass_info, unknown_info) + self.check_invalid(knownclass_info, unknown_info) + + # knownclass nonnull + expected = """ + [p0] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(knownclass_info, nonnull_info, knownclass_val, expected) + self.check_invalid(knownclass_info, nonnull_info, knownclass2_val) + self.check_invalid(knownclass_info, nonnull_info) + self.check_invalid(knownclass_info, nonnull_info) + + # knownclass knownclass + self.check_no_guards(knownclass_info, knownclass_info, knownclass_val) + self.check_invalid(knownclass_info, knownclass2_info, knownclass2_val) + self.check_no_guards(knownclass_info, knownclass_info) + self.check_invalid(knownclass_info, knownclass2_info) + + # knownclass constant + self.check_invalid(knownclass_info, constantnull_info, constantnull_val) + self.check_invalid(knownclass_info, constclass2_info, constclass2_val) + self.check_invalid(knownclass_info, constantnull_info) + self.check_invalid(knownclass_info, constclass2_info) + + + # constant unknown + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, unknown_info, constant_val, expected) + self.check_invalid(constant_info, unknown_info, unknownnull_val) + self.check_invalid(constant_info, unknown_info) + self.check_invalid(constant_info, unknown_info) + + # constant nonnull + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, nonnull_info, constant_val, expected) + self.check_invalid(constant_info, nonnull_info, constclass2_val) + self.check_invalid(constant_info, nonnull_info) + self.check_invalid(constant_info, nonnull_info) + + # constant knownclass + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, knownclass_info, constant_val, expected) + self.check_invalid(constant_info, knownclass_info, unknownnull_val) + self.check_invalid(constant_info, knownclass_info) + self.check_invalid(constant_info, knownclass_info) + + # constant constant + self.check_no_guards(constant_info, constant_info, constant_val) + self.check_invalid(constant_info, constantnull_info, constantnull_val) + self.check_no_guards(constant_info, constant_info) + self.check_invalid(constant_info, constantnull_info) + + def test_intbounds(self): - value1 = OptValue(BoxInt()) + value1 = OptValue(BoxInt(15)) value1.intbound.make_ge(IntBound(0, 10)) value1.intbound.make_le(IntBound(20, 30)) info1 = NotVirtualStateInfo(value1) @@ -174,10 +366,19 @@ i2 = int_le(i0, 30) guard_true(i2) [] """ - self.guards(info1, info2, BoxInt(15), expected) - py.test.raises(InvalidLoop, self.guards, - info1, info2, BoxInt(50), expected) + self.guards(info1, info2, value1, expected) + self.check_invalid(info1, info2, BoxInt(50)) + def test_intbounds_constant(self): + value1 = OptValue(BoxInt(15)) + value1.intbound.make_ge(IntBound(0, 10)) + value1.intbound.make_le(IntBound(20, 30)) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(ConstInt(10000))) + self.check_invalid(info1, info2) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(ConstInt(11))) + self.check_no_guards(info1, info2) def test_known_class(self): value1 = OptValue(self.nodebox) @@ -191,8 +392,7 @@ guard_class(p0, ConstClass(node_vtable)) [] """ self.guards(info1, info2, self.nodebox, expected) - py.test.raises(InvalidLoop, self.guards, - info1, info2, BoxPtr(), expected) + self.check_invalid(info1, info2, BoxPtr()) def test_known_class_value(self): value1 = OptValue(self.nodebox) @@ -219,7 +419,7 @@ self.compare(guards, expected, [box]) def test_equal_inputargs(self): - value = OptValue(self.nodebox) + value = OptValue(self.nodebox) classbox = self.cpu.ts.cls_of_box(self.nodebox) value.make_constant_class(classbox, -1) knownclass_info = NotVirtualStateInfo(value) @@ -242,22 +442,130 @@ expected = """ [p0] - guard_nonnull(p0) [] + guard_nonnull(p0) [] guard_class(p0, ConstClass(node_vtable)) [] """ - guards = [] - vstate1.generate_guards(vstate2, [self.nodebox, self.nodebox], self.cpu, guards) - self.compare(guards, expected, [self.nodebox]) + state = vstate1.generate_guards(vstate2, [value, value], self.cpu) + self.compare(state.extra_guards, expected, [self.nodebox]) - with py.test.raises(InvalidLoop): - guards = [] - vstate1.generate_guards(vstate3, [self.nodebox, self.nodebox], - self.cpu, guards) - with py.test.raises(InvalidLoop): - guards = [] - vstate2.generate_guards(vstate3, [self.nodebox, self.nodebox], - self.cpu, guards) - + with py.test.raises(VirtualStatesCantMatch): + vstate1.generate_guards(vstate3, [value, value], + self.cpu) + with py.test.raises(VirtualStatesCantMatch): + vstate2.generate_guards(vstate3, [value, value], + self.cpu) + + + def test_generate_guards_on_virtual_fields_matches_array(self): + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + descr = object() + + info1 = VArrayStateInfo(descr) + info1.fieldstate = [innerinfo1] + + info2 = VArrayStateInfo(descr) + info2.fieldstate = [innerinfo2] + + value1 = VArrayValue(descr, None, 1, self.nodebox) + value1._items[0] = OptValue(self.nodebox) + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected, [self.nodebox]) + + def test_generate_guards_on_virtual_fields_matches_instance(self): + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + info1 = VirtualStateInfo(ConstInt(42), [1]) + info1.fieldstate = [innerinfo1] + + info2 = VirtualStateInfo(ConstInt(42), [1]) + info2.fieldstate = [innerinfo2] + + value1 = VirtualValue(self.cpu, constclassbox, self.nodebox) + value1._fields = {1: OptValue(self.nodebox)} + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected, [self.nodebox]) + + def test_generate_guards_on_virtual_fields_matches_struct(self): + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + structdescr = object() + + info1 = VStructStateInfo(structdescr, [1]) + info1.fieldstate = [innerinfo1] + + info2 = VStructStateInfo(structdescr, [1]) + info2.fieldstate = [innerinfo2] + + value1 = VStructValue(self.cpu, structdescr, self.nodebox) + value1._fields = {1: OptValue(self.nodebox)} + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected, [self.nodebox]) + + def test_generate_guards_on_virtual_fields_matches_arraystruct(self): + innervalue1 = OptValue(self.nodebox) + constclassbox = self.cpu.ts.cls_of_box(self.nodebox) + innervalue1.make_constant_class(constclassbox, -1) + innerinfo1 = NotVirtualStateInfo(innervalue1) + innerinfo1.position = 1 + innerinfo2 = NotVirtualStateInfo(OptValue(self.nodebox)) + innerinfo2.position = 1 + + arraydescr = object() + fielddescr = object() + + info1 = VArrayStructStateInfo(arraydescr, [[fielddescr]]) + info1.fieldstate = [innerinfo1] + + info2 = VArrayStructStateInfo(arraydescr, [[fielddescr]]) + info2.fieldstate = [innerinfo2] + + value1 = VArrayStructValue(arraydescr, 1, self.nodebox) + value1._items[0][fielddescr] = OptValue(self.nodebox) + + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(info1, info2, value1, expected, [self.nodebox]) + + # _________________________________________________________________________ + # the below tests don't really have anything to do with guard generation + def test_virtuals_with_equal_fields(self): info1 = VirtualStateInfo(ConstInt(42), [1, 2]) value = OptValue(self.nodebox) @@ -471,7 +779,6 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - bridge.resume_at_jump_descr = FakeDescrWithSnapshot() optimize_trace(metainterp_sd, bridge, self.enable_opts) @@ -480,6 +787,7 @@ loops = (loops, ) loops = [self.parse(loop) for loop in loops] bridge = self.parse(bridge) + self.add_guard_future_condition(bridge) for loop in loops: loop.preamble = self.unroll_and_optimize(loop) preamble = loops[0].preamble @@ -615,26 +923,26 @@ def test_constant(self): loops = """ - [p0] - p1 = same_as(ConstPtr(myptr)) - jump(p1) + [i0] + i1 = same_as(1) + jump(i1) """, """ - [p0] - p1 = same_as(ConstPtr(myptr2)) - jump(p1) + [i0] + i1 = same_as(2) + jump(i1) """, """ - [p0] - jump(p0) + [i0] + jump(i0) """ expected = """ - [p0] + [i0] jump() """ self.optimize_bridge(loops, loops[0], expected, 'Loop0') self.optimize_bridge(loops, loops[1], expected, 'Loop1') expected = """ - [p0] - jump(p0) + [i0] + jump(i0) """ self.optimize_bridge(loops, loops[2], expected, 'Loop2') @@ -658,7 +966,7 @@ """ self.optimize_bridge(loop, bridge, expected, p0=self.myptr) - def test_virtual(self): + def test_simple_virtual(self): loops = """ [p0, p1] p2 = new_with_vtable(ConstClass(node_vtable)) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -6,7 +6,8 @@ from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, Optimization -from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateAdder, ShortBoxes, BadVirtualState +from rpython.jit.metainterp.optimizeopt.virtualstate import (VirtualStateConstructor, + ShortBoxes, BadVirtualState, VirtualStatesCantMatch) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.resume import Snapshot from rpython.rlib.debug import debug_print, debug_start, debug_stop @@ -53,6 +54,10 @@ self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) self.boxes_created_this_iteration = None + def get_virtual_state(self, args): + modifier = VirtualStateConstructor(self.optimizer) + return modifier.get_virtual_state(args) + def fix_snapshot(self, jump_args, snapshot): if snapshot is None: return None @@ -77,6 +82,12 @@ else: start_label = None + patchguardop = None + if len(loop.operations) > 1: + patchguardop = loop.operations[-2] + if patchguardop.getopnum() != rop.GUARD_FUTURE_CONDITION: + patchguardop = None + jumpop = loop.operations[-1] if jumpop.getopnum() == rop.JUMP or jumpop.getopnum() == rop.LABEL: loop.operations = loop.operations[:-1] @@ -94,7 +105,7 @@ stop_label = ResOperation(rop.LABEL, jumpop.getarglist(), None, TargetToken(cell_token)) if jumpop.getopnum() == rop.JUMP: - if self.jump_to_already_compiled_trace(jumpop): + if self.jump_to_already_compiled_trace(jumpop, patchguardop): # Found a compiled trace to jump to if self.short: # Construct our short preamble @@ -108,7 +119,7 @@ descr=start_label.getdescr()) if self.short: # Construct our short preamble - self.close_loop(start_label, jumpop) + self.close_loop(start_label, jumpop, patchguardop) else: self.optimizer.send_extra_operation(jumpop) return @@ -147,28 +158,14 @@ start_target = start_label.getdescr() assert isinstance(stop_target, TargetToken) assert isinstance(start_target, TargetToken) - if stop_target.targeting_jitcell_token is not start_target.targeting_jitcell_token: - return False + return stop_target.targeting_jitcell_token is start_target.targeting_jitcell_token - return True - - #args = stop_label.getarglist() - #modifier = VirtualStateAdder(self.optimizer) - #virtual_state = modifier.get_virtual_state(args) - #if self.initial_virtual_state.generalization_of(virtual_state): - # return True def export_state(self, targetop): original_jump_args = targetop.getarglist() jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] - assert self.optimizer.loop.resume_at_jump_descr - resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() - assert isinstance(resume_at_jump_descr, ResumeGuardDescr) - resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) - - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(jump_args) + virtual_state = self.get_virtual_state(jump_args) values = [self.getvalue(arg) for arg in jump_args] inputargs = virtual_state.make_inputargs(values, self.optimizer) @@ -195,7 +192,6 @@ targetop.initarglist(inputargs) target_token.virtual_state = virtual_state target_token.short_preamble = [ResOperation(rop.LABEL, short_inputargs, None)] - target_token.resume_at_jump_descr = resume_at_jump_descr exported_values = {} for box in inputargs: @@ -222,15 +218,13 @@ if not exported_state: # No state exported, construct one without virtuals self.short = None - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(self.inputargs) + virtual_state = self.get_virtual_state(self.inputargs) self.initial_virtual_state = virtual_state return self.short = target_token.short_preamble[:] self.short_seen = {} self.short_boxes = exported_state.short_boxes - self.short_resume_at_jump_descr = target_token.resume_at_jump_descr self.initial_virtual_state = target_token.virtual_state seen = {} @@ -286,19 +280,13 @@ self.boxes_created_this_iteration = {} i = 0 while i < len(newoperations): - op = newoperations[i] - self.boxes_created_this_iteration[op.result] = None - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - for a in args: - self.import_box(a, inputargs, short_jumpargs, []) + self._import_op(newoperations[i], inputargs, short_jumpargs, []) i += 1 newoperations = self.optimizer.get_newoperations() self.short.append(ResOperation(rop.JUMP, short_jumpargs, None, descr=start_label.getdescr())) self.finalize_short_preamble(start_label) - def close_loop(self, start_label, jumpop): + def close_loop(self, start_label, jumpop, patchguardop): virtual_state = self.initial_virtual_state short_inputargs = self.short[0].getarglist() inputargs = self.inputargs @@ -330,19 +318,8 @@ args[short_inputargs[i]] = jmp_to_short_args[i] self.short_inliner = Inliner(short_inputargs, jmp_to_short_args) - i = 1 - while i < len(self.short): - # Note that self.short might be extended during this loop - op = self.short[i] - newop = self.short_inliner.inline_op(op) - self.optimizer.send_extra_operation(newop) - if op.result in self.short_boxes.assumed_classes: - classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) - assumed_classbox = self.short_boxes.assumed_classes[op.result] - if not classbox or not classbox.same_constant(assumed_classbox): - raise InvalidLoop('Class of opaque pointer needed in short ' + - 'preamble unknown at end of loop') - i += 1 + self._inline_short_preamble(self.short, self.short_inliner, + patchguardop, self.short_boxes.assumed_classes) # Import boxes produced in the preamble but used in the loop newoperations = self.optimizer.get_newoperations() @@ -357,19 +334,7 @@ self.import_box(a, inputargs, short_jumpargs, jumpargs) j += 1 else: - op = newoperations[i] - - self.boxes_created_this_iteration[op.result] = None - args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() - - #if self.optimizer.loop.logops: - # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) - for a in args: - #if self.optimizer.loop.logops: - # debug_print('A: ' + self.optimizer.loop.logops.repr_of_arg(a)) - self.import_box(a, inputargs, short_jumpargs, jumpargs) + self._import_op(newoperations[i], inputargs, short_jumpargs, jumpargs) i += 1 newoperations = self.optimizer.get_newoperations() @@ -379,12 +344,12 @@ # Verify that the virtual state at the end of the loop is one # that is compatible with the virtual state at the start of the loop - modifier = VirtualStateAdder(self.optimizer) - final_virtual_state = modifier.get_virtual_state(original_jumpargs) + final_virtual_state = self.get_virtual_state(original_jumpargs) #debug_start('jit-log-virtualstate') #virtual_state.debug_print('Closed loop with ') bad = {} - if not virtual_state.generalization_of(final_virtual_state, bad): + if not virtual_state.generalization_of(final_virtual_state, bad, + cpu=self.optimizer.cpu): # We ended up with a virtual state that is not compatible # and we are thus unable to jump to the start of the loop #final_virtual_state.debug_print("Bad virtual state at end of loop, ", @@ -417,8 +382,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.resume_at_jump_descr.clone_if_mutable() - op.setdescr(descr) + op.setdescr(None) # will be set to a proper descr when the preamble is used short[i] = op # Clone ops and boxes to get private versions and @@ -440,8 +404,6 @@ if op.result and op.result in self.short_boxes.assumed_classes: target_token.assumed_classes[newop.result] = self.short_boxes.assumed_classes[op.result] short[i] = newop - target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() - inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -485,8 +447,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.short_resume_at_jump_descr.clone_if_mutable() - op.setdescr(descr) + op.setdescr(None) # will be set to a proper descr when the preamble is used if guards_needed and self.short_boxes.has_producer(op.result): value_guards = self.getvalue(op.result).make_guards(op.result) @@ -528,7 +489,17 @@ box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - def jump_to_already_compiled_trace(self, jumpop): + + def _import_op(self, op, inputargs, short_jumpargs, jumpargs): + self.boxes_created_this_iteration[op.result] = None + args = op.getarglist() + if op.is_guard(): + args = args + op.getfailargs() + + for a in args: + self.import_box(a, inputargs, short_jumpargs, jumpargs) + + def jump_to_already_compiled_trace(self, jumpop, patchguardop): assert jumpop.getopnum() == rop.JUMP cell_token = jumpop.getdescr() @@ -543,72 +514,84 @@ return True args = jumpop.getarglist() - modifier = VirtualStateAdder(self.optimizer) - virtual_state = modifier.get_virtual_state(args) + virtual_state = self.get_virtual_state(args) + values = [self.getvalue(arg) + for arg in jumpop.getarglist()] debug_start('jit-log-virtualstate') - virtual_state.debug_print("Looking for ") + virtual_state.debug_print("Looking for ", metainterp_sd=self.optimizer.metainterp_sd) for target in cell_token.target_tokens: if not target.virtual_state: continue - ok = False extra_guards = [] - bad = {} - debugmsg = 'Did not match ' - if target.virtual_state.generalization_of(virtual_state, bad): - ok = True - debugmsg = 'Matched ' - else: - try: - cpu = self.optimizer.cpu - target.virtual_state.generate_guards(virtual_state, - args, cpu, - extra_guards) + try: + cpu = self.optimizer.cpu + state = target.virtual_state.generate_guards(virtual_state, + values, + cpu) - ok = True + extra_guards = state.extra_guards + if extra_guards: debugmsg = 'Guarded to match ' - except InvalidLoop: - pass - target.virtual_state.debug_print(debugmsg, bad) + else: + debugmsg = 'Matched ' + except VirtualStatesCantMatch, e: + debugmsg = 'Did not match:\n%s\n' % (e.msg, ) + target.virtual_state.debug_print(debugmsg, e.state.bad, metainterp_sd=self.optimizer.metainterp_sd) + continue - if ok: - debug_stop('jit-log-virtualstate') + assert patchguardop is not None or (extra_guards == [] and len(target.short_preamble) == 1) - values = [self.getvalue(arg) - for arg in jumpop.getarglist()] - args = target.virtual_state.make_inputargs(values, self.optimizer, - keyboxes=True) - short_inputargs = target.short_preamble[0].getarglist() - inliner = Inliner(short_inputargs, args) + target.virtual_state.debug_print(debugmsg, {}) - for guard in extra_guards: - if guard.is_guard(): - descr = target.resume_at_jump_descr.clone_if_mutable() - inliner.inline_descr_inplace(descr) - guard.setdescr(descr) - self.optimizer.send_extra_operation(guard) + debug_stop('jit-log-virtualstate') - try: - for shop in target.short_preamble[1:]: - newop = inliner.inline_op(shop) - self.optimizer.send_extra_operation(newop) - if shop.result in target.assumed_classes: - classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) - if not classbox or not classbox.same_constant(target.assumed_classes[shop.result]): - raise InvalidLoop('The class of an opaque pointer at the end ' + - 'of the bridge does not mach the class ' + - 'it has at the start of the target loop') - except InvalidLoop: - #debug_print("Inlining failed unexpectedly", - # "jumping to preamble instead") - assert cell_token.target_tokens[0].virtual_state is None - jumpop.setdescr(cell_token.target_tokens[0]) - self.optimizer.send_extra_operation(jumpop) - return True + args = target.virtual_state.make_inputargs(values, self.optimizer, + keyboxes=True) + short_inputargs = target.short_preamble[0].getarglist() + inliner = Inliner(short_inputargs, args) + + for guard in extra_guards: + if guard.is_guard(): + descr = patchguardop.getdescr().clone_if_mutable() + guard.setdescr(descr) + self.optimizer.send_extra_operation(guard) + + try: + # NB: the short_preamble ends with a jump + self._inline_short_preamble(target.short_preamble, inliner, patchguardop, target.assumed_classes) + except InvalidLoop: + #debug_print("Inlining failed unexpectedly", + # "jumping to preamble instead") + assert cell_token.target_tokens[0].virtual_state is None + jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer.send_extra_operation(jumpop) + return True debug_stop('jit-log-virtualstate') return False + def _inline_short_preamble(self, short_preamble, inliner, patchguardop, assumed_classes): + i = 1 + # XXX this is intentiontal :-(. short_preamble can change during the + # loop in some cases + while i < len(short_preamble): + shop = short_preamble[i] + newop = inliner.inline_op(shop) + if newop.is_guard(): + if not patchguardop: + raise InvalidLoop("would like to have short preamble, but it has a guard and there's no guard_future_condition") + descr = patchguardop.getdescr().clone_if_mutable() + newop.setdescr(descr) + self.optimizer.send_extra_operation(newop) + if shop.result in assumed_classes: + classbox = self.getvalue(newop.result).get_constant_class(self.optimizer.cpu) + if not classbox or not classbox.same_constant(assumed_classes[shop.result]): + raise InvalidLoop('The class of an opaque pointer before the jump ' + + 'does not mach the class ' + + 'it has at the start of the target loop') + i += 1 + class ValueImporter(object): def __init__(self, unroll, value, op): diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.optimizeopt.rawbuffer import RawBuffer, InvalidRawOperation from rpython.jit.metainterp.resoperation import rop, ResOperation -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize class AbstractVirtualValue(optimizer.OptValue): @@ -45,27 +45,17 @@ return value return OptValue(self.force_box(optforce)) - def get_args_for_fail(self, modifier): + def visitor_walk_recursive(self, visitor): # checks for recursion: it is False unless # we have already seen the very same keybox - if self.box is None and not modifier.already_seen_virtual(self.keybox): - self._get_args_for_fail(modifier) + if self.box is None and not visitor.already_seen_virtual(self.keybox): + self._visitor_walk_recursive(visitor) - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): raise NotImplementedError("abstract base") - def make_virtual_info(self, modifier, fieldnums): - if fieldnums is None: - return self._make_virtual(modifier) - vinfo = self._cached_vinfo - if vinfo is not None and vinfo.equals(fieldnums): - return vinfo - vinfo = self._make_virtual(modifier) - vinfo.set_content(fieldnums) - self._cached_vinfo = vinfo - return vinfo - - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): raise NotImplementedError("abstract base") def _really_force(self, optforce): @@ -202,13 +192,13 @@ self._cached_sorted_fields = lst return lst - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): lst = self._get_field_descr_list() fieldboxes = [self._fields[ofs].get_key_box() for ofs in lst] - modifier.register_virtual_fields(self.keybox, fieldboxes) + visitor.register_virtual_fields(self.keybox, fieldboxes) for ofs in lst: fieldvalue = self._fields[ofs] - fieldvalue.get_args_for_fail(modifier) + fieldvalue.visitor_walk_recursive(visitor) class VirtualValue(AbstractVirtualStructValue): level = optimizer.LEVEL_KNOWNCLASS @@ -218,9 +208,10 @@ assert isinstance(known_class, Const) self.known_class = known_class - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): fielddescrs = self._get_field_descr_list() - return modifier.make_virtual(self.known_class, fielddescrs) + return visitor.visit_virtual(self.known_class, fielddescrs) def _get_descr(self): return vtable2descr(self.cpu, self.known_class.getint()) @@ -238,9 +229,10 @@ AbstractVirtualStructValue.__init__(self, cpu, keybox, source_op) self.structdescr = structdescr - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): fielddescrs = self._get_field_descr_list() - return modifier.make_vstruct(self.structdescr, fielddescrs) + return visitor.visit_vstruct(self.structdescr, fielddescrs) def _get_descr(self): return self.structdescr @@ -260,15 +252,15 @@ def set_item_value(self, i, newval): raise NotImplementedError - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): itemboxes = [] for i in range(self.getlength()): itemvalue = self.get_item_value(i) itemboxes.append(itemvalue.get_key_box()) - modifier.register_virtual_fields(self.keybox, itemboxes) + visitor.register_virtual_fields(self.keybox, itemboxes) for i in range(self.getlength()): itemvalue = self.get_item_value(i) - itemvalue.get_args_for_fail(modifier) + itemvalue.visitor_walk_recursive(visitor) class VArrayValue(AbstractVArrayValue): @@ -326,8 +318,9 @@ descr=self.arraydescr) optforce.emit_operation(op) - def _make_virtual(self, modifier): - return modifier.make_varray(self.arraydescr) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_varray(self.arraydescr) class VArrayStructValue(AbstractVirtualValue): @@ -373,16 +366,16 @@ descrs.append(item_descrs) return descrs - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): itemdescrs = self._get_list_of_descrs() itemboxes = [] for i in range(len(self._items)): for descr in itemdescrs[i]: itemboxes.append(self._items[i][descr].get_key_box()) - modifier.register_virtual_fields(self.keybox, itemboxes) + visitor.register_virtual_fields(self.keybox, itemboxes) for i in range(len(self._items)): for descr in itemdescrs[i]: - self._items[i][descr].get_args_for_fail(modifier) + self._items[i][descr].visitor_walk_recursive(visitor) def force_at_end_of_preamble(self, already_forced, optforce): if self in already_forced: @@ -393,8 +386,9 @@ self._items[index][descr] = self._items[index][descr].force_at_end_of_preamble(already_forced, optforce) return self - def _make_virtual(self, modifier): - return modifier.make_varraystruct(self.arraydescr, self._get_list_of_descrs()) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_varraystruct(self.arraydescr, self._get_list_of_descrs()) class VRawBufferValue(AbstractVArrayValue): @@ -442,11 +436,12 @@ descr=descr) optforce.emit_operation(op) - def _make_virtual(self, modifier): + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): # I *think* we need to make a copy of offsets and descrs because we # want a snapshot of the virtual state right now: if we grow more # elements later, we don't want them to go in this virtual state - return modifier.make_vrawbuffer(self.size, + return visitor.visit_vrawbuffer(self.size, self.buffer.offsets[:], self.buffer.descrs[:]) @@ -474,13 +469,14 @@ def getitem_raw(self, offset, length, descr): return self.rawbuffer_value.getitem_raw(self.offset+offset, length, descr) - def _get_args_for_fail(self, modifier): + def _visitor_walk_recursive(self, visitor): box = self.rawbuffer_value.get_key_box() - modifier.register_virtual_fields(self.keybox, [box]) - self.rawbuffer_value.get_args_for_fail(modifier) + visitor.register_virtual_fields(self.keybox, [box]) + self.rawbuffer_value.visitor_walk_recursive(visitor) - def _make_virtual(self, modifier): - return modifier.make_vrawslice(self.offset) + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + return visitor.visit_vrawslice(self.offset) class OptVirtualize(optimizer.Optimization): diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -1,46 +1,74 @@ -from rpython.jit.metainterp import resume -from rpython.jit.metainterp.history import BoxInt, ConstInt, BoxPtr, Const -from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.walkvirtual import VirtualVisitor +from rpython.jit.metainterp.history import (BoxInt, ConstInt, BoxPtr, Const, + ConstPtr, ConstFloat) from rpython.jit.metainterp.optimizeopt import virtualize from rpython.jit.metainterp.optimizeopt.intutils import IntUnbounded from rpython.jit.metainterp.optimizeopt.optimizer import (LEVEL_CONSTANT, - LEVEL_KNOWNCLASS, LEVEL_NONNULL, LEVEL_UNKNOWN) + LEVEL_KNOWNCLASS, LEVEL_NONNULL, LEVEL_UNKNOWN, OptValue) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.debug import debug_start, debug_stop, debug_print -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, import_from_mixin class BadVirtualState(Exception): pass +class VirtualStatesCantMatch(Exception): + def __init__(self, msg='?', state=None): + self.msg = msg + self.state = state -class AbstractVirtualStateInfo(resume.AbstractVirtualInfo): +class GenerateGuardState(object): + def __init__(self, cpu=None, guards=None, renum=None, bad=None): + self.cpu = cpu + if guards is None: + guards = [] + self.extra_guards = guards + if renum is None: + renum = {} + self.renum = renum + if bad is None: + bad = {} + self.bad = bad + +class AbstractVirtualStateInfo(object): position = -1 - def generalization_of(self, other, renum, bad): + def generate_guards(self, other, value, state): + """ generate guards (output in the list extra_guards) that make runtime + values of the shape other match the shape of self. if that's not + possible, VirtualStatesCantMatch is thrown and bad gets keys set which + parts of the state are the problem. + + the function can peek into value (and particularly also the boxes in + the value) as a guiding heuristic whether making such guards makes + sense. if None is passed in for value, no guard is ever generated, and + this function degenerates to a generalization check.""" + assert value is None or isinstance(value, OptValue) assert self.position != -1 - if self.position in renum: - result = renum[self.position] == other.position + if self.position in state.renum: + if state.renum[self.position] != other.position: + state.bad[self] = state.bad[other] = None + raise VirtualStatesCantMatch( + 'The numbering of the virtual states does not ' + + 'match. This means that two virtual fields ' + + 'have been set to the same Box in one of the ' + + 'virtual states but not in the other.', + state) else: - renum[self.position] = other.position - result = self.generalization_of_renumbering_done(other, renum, bad) - if not result: - bad[self] = bad[other] = None - return result + state.renum[self.position] = other.position + try: + self._generate_guards(other, value, state) + except VirtualStatesCantMatch, e: + state.bad[self] = state.bad[other] = None + if e.state is None: + e.state = state + raise e - def generate_guards(self, other, box, cpu, extra_guards, renum): - if self.generalization_of(other, renum, {}): - return - if renum[self.position] != other.position: - raise InvalidLoop('The numbering of the virtual states does not ' + - 'match. This means that two virtual fields ' + - 'have been set to the same Box in one of the ' + - 'virtual states but not in the other.') - self._generate_guards(other, box, cpu, extra_guards) - - def _generate_guards(self, other, box, cpu, extra_guards): - raise InvalidLoop('Generating guards for making the VirtualStates ' + - 'at hand match have not been implemented') + def _generate_guards(self, other, value, state): + raise VirtualStatesCantMatch( + 'Generating guards for making the VirtualStates ' + + 'at hand match have not been implemented') def enum_forced_boxes(self, boxes, value, optimizer): raise NotImplementedError @@ -55,7 +83,7 @@ def _enum(self, virtual_state): raise NotImplementedError - def debug_print(self, indent, seen, bad): + def debug_print(self, indent, seen, bad, metainterp_sd): mark = '' if self in bad: mark = '*' @@ -63,7 +91,7 @@ if self not in seen: seen[self] = True for s in self.fieldstate: - s.debug_print(indent + " ", seen, bad) + s.debug_print(indent + " ", seen, bad, metainterp_sd) else: debug_print(indent + " ...") @@ -75,26 +103,31 @@ def __init__(self, fielddescrs): self.fielddescrs = fielddescrs - def generalization_of_renumbering_done(self, other, renum, bad): - if not self._generalization_of(other): - return False + def _generate_guards(self, other, value, state): + if not self._generalization_of_structpart(other): + raise VirtualStatesCantMatch("different kinds of structs") assert isinstance(other, AbstractVirtualStructStateInfo) assert len(self.fielddescrs) == len(self.fieldstate) assert len(other.fielddescrs) == len(other.fieldstate) + if value is not None: + assert isinstance(value, virtualize.AbstractVirtualStructValue) + assert value.is_virtual() + if len(self.fielddescrs) != len(other.fielddescrs): - return False + raise VirtualStatesCantMatch("field descrs don't match") for i in range(len(self.fielddescrs)): if other.fielddescrs[i] is not self.fielddescrs[i]: - return False - if not self.fieldstate[i].generalization_of(other.fieldstate[i], - renum, bad): - return False + raise VirtualStatesCantMatch("field descrs don't match") + if value is not None: + v = value._fields[self.fielddescrs[i]] # must be there + else: + v = None + self.fieldstate[i].generate_guards(other.fieldstate[i], v, state) - return True - def _generalization_of(self, other): + def _generalization_of_structpart(self, other): raise NotImplementedError def enum_forced_boxes(self, boxes, value, optimizer): @@ -121,10 +154,11 @@ AbstractVirtualStructStateInfo.__init__(self, fielddescrs) self.known_class = known_class - def _generalization_of(self, other): + def _generalization_of_structpart(self, other): return (isinstance(other, VirtualStateInfo) and self.known_class.same_constant(other.known_class)) + def debug_header(self, indent): debug_print(indent + 'VirtualStateInfo(%d):' % self.position) @@ -134,7 +168,7 @@ AbstractVirtualStructStateInfo.__init__(self, fielddescrs) self.typedescr = typedescr - def _generalization_of(self, other): + def _generalization_of_structpart(self, other): return (isinstance(other, VStructStateInfo) and self.typedescr is other.typedescr) @@ -147,20 +181,20 @@ def __init__(self, arraydescr): self.arraydescr = arraydescr - def _generalization_of(self, other): - return (isinstance(other, VArrayStateInfo) and - self.arraydescr is other.arraydescr) - - def generalization_of_renumbering_done(self, other, renum, bad): - if not self._generalization_of(other): - return False + def _generate_guards(self, other, value, state): + if not isinstance(other, VArrayStateInfo): + raise VirtualStatesCantMatch("other is not an array") + if self.arraydescr is not other.arraydescr: + raise VirtualStatesCantMatch("other is a different kind of array") if len(self.fieldstate) != len(other.fieldstate): - return False + raise VirtualStatesCantMatch("other has a different length") + v = None for i in range(len(self.fieldstate)): - if not self.fieldstate[i].generalization_of(other.fieldstate[i], - renum, bad): - return False - return True + if value is not None: + assert isinstance(value, virtualize.VArrayValue) + v = value._items[i] + self.fieldstate[i].generate_guards(other.fieldstate[i], + v, state) def enum_forced_boxes(self, boxes, value, optimizer): if not isinstance(value, virtualize.VArrayValue): @@ -188,30 +222,31 @@ self.arraydescr = arraydescr self.fielddescrs = fielddescrs - def generalization_of_renumbering_done(self, other, renum, bad): - if not self._generalization_of(other): - return False + def _generate_guards(self, other, value, state): + if not isinstance(other, VArrayStructStateInfo): + raise VirtualStatesCantMatch("other is not an VArrayStructStateInfo") + if self.arraydescr is not other.arraydescr: + raise VirtualStatesCantMatch("other is a different kind of array") - assert isinstance(other, VArrayStructStateInfo) if len(self.fielddescrs) != len(other.fielddescrs): - return False + raise VirtualStatesCantMatch("other has a different length") p = 0 + v = None for i in range(len(self.fielddescrs)): if len(self.fielddescrs[i]) != len(other.fielddescrs[i]): - return False + raise VirtualStatesCantMatch("other has a different length") for j in range(len(self.fielddescrs[i])): - if self.fielddescrs[i][j] is not other.fielddescrs[i][j]: - return False - if not self.fieldstate[p].generalization_of(other.fieldstate[p], - renum, bad): - return False + descr = self.fielddescrs[i][j] + if descr is not other.fielddescrs[i][j]: + raise VirtualStatesCantMatch("other is a different kind of array") + if value is not None: + assert isinstance(value, virtualize.VArrayStructValue) + v = value._items[i][descr] + self.fieldstate[p].generate_guards(other.fieldstate[p], + v, + state) p += 1 - return True - - def _generalization_of(self, other): - return (isinstance(other, VArrayStructStateInfo) and - self.arraydescr is other.arraydescr) def _enum(self, virtual_state): for s in self.fieldstate: @@ -256,97 +291,109 @@ self.position_in_notvirtuals = -1 self.lenbound = value.lenbound - def generalization_of_renumbering_done(self, other, renum, bad): + + def _generate_guards(self, other, value, state): + if value is None or self.is_opaque: + box = None # generating guards for opaque pointers isn't safe + else: + box = value.box # XXX This will always retrace instead of forcing anything which # might be what we want sometimes? if not isinstance(other, NotVirtualStateInfo): - return False - if other.level < self.level: - return False - if self.level == LEVEL_CONSTANT: - if not self.constbox.same_constant(other.constbox): - return False + raise VirtualStatesCantMatch( + 'The VirtualStates does not match as a ' + + 'virtual appears where a pointer is needed ' + + 'and it is too late to force it.') + + + extra_guards = state.extra_guards + cpu = state.cpu + if self.lenbound and not self.lenbound.generalization_of(other.lenbound): + raise VirtualStatesCantMatch("length bound does not match") + + if self.level == LEVEL_UNKNOWN: + # confusingly enough, this is done also for pointers + # which have the full range as the "bound", so it always works + return self._generate_guards_intbounds(other, box, extra_guards) + + # the following conditions often peek into the runtime value that the + # box had when tracing. This value is only used as an educated guess. + # It is used here to choose between either emitting a guard and jumping + # to an existing compiled loop or retracing the loop. Both alternatives + # will always generate correct behaviour, but performance will differ. + elif self.level == LEVEL_NONNULL: + if other.level == LEVEL_UNKNOWN: + if box is not None and box.nonnull(): + op = ResOperation(rop.GUARD_NONNULL, [box], None) + extra_guards.append(op) + return + else: + raise VirtualStatesCantMatch("other not known to be nonnull") + elif other.level == LEVEL_NONNULL: + return + elif other.level == LEVEL_KNOWNCLASS: + return # implies nonnull + else: + assert other.level == LEVEL_CONSTANT + assert other.constbox + if not other.constbox.nonnull(): + raise VirtualStatesCantMatch("constant is null") + return + elif self.level == LEVEL_KNOWNCLASS: - if not self.known_class.same_constant(other.known_class): - return False - elif self.level == LEVEL_NONNULL: - if other.constbox and not other.constbox.nonnull(): - return False + if other.level == LEVEL_UNKNOWN: + if (box and box.nonnull() and + self.known_class.same_constant(cpu.ts.cls_of_box(box))): + op = ResOperation(rop.GUARD_NONNULL, [box], None) + extra_guards.append(op) + op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + extra_guards.append(op) + return + else: + raise VirtualStatesCantMatch("other's class is unknown") + elif other.level == LEVEL_NONNULL: + if box and self.known_class.same_constant(cpu.ts.cls_of_box(box)): + op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + extra_guards.append(op) + return + else: + raise VirtualStatesCantMatch("other's class is unknown") + elif other.level == LEVEL_KNOWNCLASS: + if self.known_class.same_constant(other.known_class): + return + raise VirtualStatesCantMatch("classes don't match") + else: + assert other.level == LEVEL_CONSTANT + if (other.constbox.nonnull() and + self.known_class.same_constant(cpu.ts.cls_of_box(other.constbox))): + return + else: + raise VirtualStatesCantMatch("classes don't match") - if not self.intbound.contains_bound(other.intbound): - return False - if self.lenbound and other.lenbound: - if self.lenbound.mode != other.lenbound.mode or \ - self.lenbound.descr != other.lenbound.descr or \ - not self.lenbound.bound.contains_bound(other.lenbound.bound): - return False - elif self.lenbound: - return False - return True + else: + assert self.level == LEVEL_CONSTANT + if other.level == LEVEL_CONSTANT: + if self.constbox.same_constant(other.constbox): + return + raise VirtualStatesCantMatch("different constants") + if box is not None and self.constbox.same_constant(box.constbox()): + op = ResOperation(rop.GUARD_VALUE, [box, self.constbox], None) + extra_guards.append(op) + return + else: + raise VirtualStatesCantMatch("other not constant") + assert 0, "unreachable" - def _generate_guards(self, other, box, cpu, extra_guards): - if not isinstance(other, NotVirtualStateInfo): - raise InvalidLoop('The VirtualStates does not match as a ' + - 'virtual appears where a pointer is needed ' + - 'and it is too late to force it.') - - if self.lenbound or other.lenbound: - raise InvalidLoop('The array length bounds does not match.') - - if self.is_opaque: - raise InvalidLoop('Generating guards for opaque pointers is not safe') - - if self.level == LEVEL_KNOWNCLASS and \ - box.nonnull() and \ - self.known_class.same_constant(cpu.ts.cls_of_box(box)): - # Note: This is only a hint on what the class of box was - # during the trace. There are actually no guarentees that this - # box realy comes from a trace. The hint is used here to choose - # between either eimtting a guard_class and jumping to an - # excisting compiled loop or retracing the loop. Both - # alternatives will always generate correct behaviour, but - # performace will differ. - op = ResOperation(rop.GUARD_NONNULL, [box], None) - extra_guards.append(op) - op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) - extra_guards.append(op) + def _generate_guards_intbounds(self, other, box, extra_guards): + if self.intbound.contains_bound(other.intbound): return - - if (self.level == LEVEL_NONNULL and - other.level == LEVEL_UNKNOWN and - isinstance(box, BoxPtr) and - box.nonnull()): - op = ResOperation(rop.GUARD_NONNULL, [box], None) - extra_guards.append(op) + if (box is not None and isinstance(box, BoxInt) and + self.intbound.contains(box.getint())): + # this may generate a few more guards than needed, but they are + # optimized away when emitting them + self.intbound.make_guards(box, extra_guards) return - - if (self.level == LEVEL_UNKNOWN and - other.level == LEVEL_UNKNOWN and - isinstance(box, BoxInt) and - self.intbound.contains(box.getint())): - if self.intbound.has_lower: - bound = self.intbound.lower - if not (other.intbound.has_lower and - other.intbound.lower >= bound): - res = BoxInt() - op = ResOperation(rop.INT_GE, [box, ConstInt(bound)], res) - extra_guards.append(op) - op = ResOperation(rop.GUARD_TRUE, [res], None) - extra_guards.append(op) - if self.intbound.has_upper: - bound = self.intbound.upper - if not (other.intbound.has_upper and - other.intbound.upper <= bound): - res = BoxInt() - op = ResOperation(rop.INT_LE, [box, ConstInt(bound)], res) - extra_guards.append(op) - op = ResOperation(rop.GUARD_TRUE, [res], None) - extra_guards.append(op) - return - - # Remaining cases are probably not interesting - raise InvalidLoop('Generating guards for making the VirtualStates ' + - 'at hand match have not been implemented') + raise VirtualStatesCantMatch("intbounds don't match") def enum_forced_boxes(self, boxes, value, optimizer): if self.level == LEVEL_CONSTANT: @@ -363,25 +410,37 @@ def _enum(self, virtual_state): if self.level == LEVEL_CONSTANT: return - self.position_in_notvirtuals = len(virtual_state.notvirtuals) - virtual_state.notvirtuals.append(self) + self.position_in_notvirtuals = virtual_state.numnotvirtuals + virtual_state.numnotvirtuals += 1 - def debug_print(self, indent, seen, bad): + def debug_print(self, indent, seen, bad, metainterp_sd=None): mark = '' if self in bad: mark = '*' - if we_are_translated(): - l = {LEVEL_UNKNOWN: 'Unknown', - LEVEL_NONNULL: 'NonNull', - LEVEL_KNOWNCLASS: 'KnownClass', - LEVEL_CONSTANT: 'Constant', - }[self.level] + if self.level == LEVEL_UNKNOWN: From noreply at buildbot.pypy.org Tue Apr 22 11:56:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 22 Apr 2014 11:56:18 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Next target test Message-ID: <20140422095618.C4A6F1D24FB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1175:da174c64bb13 Date: 2014-04-22 11:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/da174c64bb13/ Log: Next target test diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -80,6 +80,7 @@ assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER assert 0.099 <= tl.longest_marker_time <= 0.9 assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) + assert tl.longest_marker_other[0] == '\x00' def test_macros(self): self.start_transaction() @@ -175,3 +176,28 @@ raw = lib._stm_expand_marker() assert ffi.string(raw) == '27 %r' % (p,) assert seen == [29, 27] + + def test_double_abort_markers_cb(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'A') + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + py.test.raises(Conflict, stm_set_char, p, 'B') + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE + assert ffi.string(tl.longest_marker_self) == '21' + assert ffi.string(tl.longest_marker_other) == '19' From noreply at buildbot.pypy.org Tue Apr 22 12:32:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 22 Apr 2014 12:32:07 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20140422103207.E7E381D2BE7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70852:d9b1c16461ba Date: 2014-04-22 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/d9b1c16461ba/ Log: typo diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -151,4 +151,4 @@ add 'lib' prefix to link libraries on OpenBSD .. branch: small-unroll-improvements -Improve optimiziation of small allocation-heavy loops in the JIT +Improve optimization of small allocation-heavy loops in the JIT From noreply at buildbot.pypy.org Tue Apr 22 12:38:38 2014 From: noreply at buildbot.pypy.org (Matti Picus) Date: Tue, 22 Apr 2014 12:38:38 +0200 (CEST) Subject: [pypy-commit] pypy default: make status variable global to threads Message-ID: <20140422103838.907921D2BE7@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70853:15901b2a8362 Date: 2014-04-22 12:56 +0300 http://bitbucket.org/pypy/pypy/changeset/15901b2a8362/ Log: make status variable global to threads diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -165,11 +165,11 @@ sock.listen(1) s2 = RSocket(AF_INET, SOCK_STREAM) s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test - connected = False + connected = [False] #thread-mutable list def connecting(): try: s2.connect(addr) - connected = True + connected[0] = True finally: lock.release() lock = thread.allocate_lock() @@ -180,7 +180,7 @@ s1 = RSocket(fd=fd1) print 'connection accepted' lock.acquire() - assert connected + assert connected[0] print 'connecting side knows that the connection was accepted too' assert addr.eq(s2.getpeername()) #assert addr2.eq(s2.getsockname()) From noreply at buildbot.pypy.org Tue Apr 22 13:15:04 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Tue, 22 Apr 2014 13:15:04 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: Added a basic readme Message-ID: <20140422111504.40E481C06C3@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: Changeset: r794:4e58aee0b37c Date: 2014-04-22 13:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4e58aee0b37c/ Log: Added a basic readme diff --git a/README.md b/README.md new file mode 100644 --- /dev/null +++ b/README.md @@ -0,0 +1,69 @@ +Spy +========= + +A Squeak VM written in RPython, called "SPy VM". + +Setup +---- +### Required Projects +You need three repositories: +* This one +* pypy/pypy +* pypy/rsdl + +### Required packages +You need the following packages on your OS. Install with your favorite package +manager: +* pypy (For faster translation of the SPY VM) +* libsdl-dev + +### Adjusting the PYTHONPATH +In order to allow the RPython toolchain to find the rsdl module you have to add +the rsdl folder to the PYTHONPATH. Note that you have to add the rsdl subfolder +of the rsdl repository to the PYTHONPATH. + +``` +export PYTHONPATH=${PYTHONPATH}:[path to rsdl repository]/rsdl +``` + +### Setting the SDL Driver +For testing the basic functionality of the VM it is currently best to disable +the UI. You can do so by setting the SDL_VIDEODRIVER environment variable to +dummy. +``` +export SDL_VIDEODRIVER=dummy +``` + +### Building +To build the VM enter the following: + +``` +[path to pypy repository]/rpython/bin/rpython [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` + +To build the VM with enabled just-in-time compiler: +``` +[path to pypy repository]/rpython/bin/rpython -O jit [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` + +### Starting an image +The build process will produce an executable e.g. called +targetimageloadingsmalltalk-c. Start it with the following: +``` +./targetimageloadingsmalltalk-c images/Squeak4.5-*.image +``` + +Setup for stm-enabled SPY +--- +There are two branches integrating the RPython STM into SPY: stm-c4, +storage-stm-c4. You have to change two things of the setup to build those +branches. + +1. Change your local pypy repository to the stm-c4 branch. +2. Build using the following command: +``` +[path to pypy repository]/rpython/bin/rpython --gc=stmgc [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -223,8 +223,9 @@ # driver.config.translation.gcrootfinder = "stm" from rpython.rlib import rgc if hasattr(rgc, "stm_is_enabled"): - driver.config.translation.stm = True - driver.config.translation.thread = True + pass + #driver.config.translation.stm = True + #driver.config.translation.thread = True return entry_point, None From noreply at buildbot.pypy.org Tue Apr 22 14:58:02 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Apr 2014 14:58:02 +0200 (CEST) Subject: [pypy-commit] benchmarks default: add initial version of threaded bottle benchmark running on pypy/cpython/jython Message-ID: <20140422125802.02E7C1C06C3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r250:c5f5c7137ed0 Date: 2014-04-22 14:57 +0200 http://bitbucket.org/pypy/benchmarks/changeset/c5f5c7137ed0/ Log: add initial version of threaded bottle benchmark running on pypy/cpython/jython diff too long, truncating to 2000 out of 3662 lines diff --git a/multithread/bottle/app.py b/multithread/bottle/app.py new file mode 100755 --- /dev/null +++ b/multithread/bottle/app.py @@ -0,0 +1,65 @@ +from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool +from SocketServer import ThreadingMixIn +from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler + +import threading, socket, time + +from wsgiref.simple_server import WSGIRequestHandler, WSGIServer + +class ThreadedHTTPServer(HTTPServer): + """Handle requests in a separate thread.""" + application = None + allow_reuse_address = True + + def server_bind(self): + """Override server_bind to store the server name.""" + HTTPServer.server_bind(self) + self.setup_environ() + + def setup_environ(self): + # Set up base environment + env = self.base_environ = {} + env['SERVER_NAME'] = self.server_name + env['GATEWAY_INTERFACE'] = 'CGI/1.1' + env['SERVER_PORT'] = str(self.server_port) + env['REMOTE_HOST']='' + env['CONTENT_LENGTH']='' + env['SCRIPT_NAME'] = '' + + def get_app(self): + return self.application + + def set_app(self,application): + self.application = application + + def process_request(self, request, client_address): + def worker(request, client_address): + try: + self.finish_request(request, client_address) + except: + self.handle_error(request, client_address) + finally: + self.close_request(request) + Future(worker, request, client_address) + + + +from bottle import route, run, ServerAdapter + +class ThreadedServer(ServerAdapter): + def run(self, app): # pragma: no cover + srv = ThreadedHTTPServer((self.host, self.port), WSGIRequestHandler) + srv.set_app(app) + srv.serve_forever() + + + at route('/') +def index(): + time.sleep(0.5) + return "hi from " + threading.currentThread().getName() + + +if __name__ == "__main__": + set_thread_pool(ThreadPool(8)) + run(server=ThreadedServer, # debug=True, + host='localhost', port=8080) diff --git a/multithread/bottle/bottle.py b/multithread/bottle/bottle.py new file mode 100644 --- /dev/null +++ b/multithread/bottle/bottle.py @@ -0,0 +1,3587 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Bottle is a fast and simple micro-framework for small web applications. It +offers request dispatching (Routes) with url parameter support, templates, +a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and +template engines - all in a single file and with no dependencies other than the +Python Standard Library. + +Homepage and documentation: http://bottlepy.org/ + +Copyright (c) 2014, Marcel Hellkamp. +License: MIT (see LICENSE for details) +""" + +from __future__ import with_statement + +__author__ = 'Marcel Hellkamp' +__version__ = '0.13-dev' +__license__ = 'MIT' + +# The gevent server adapter needs to patch some modules before they are imported +# This is why we parse the commandline parameters here but handle them later +if __name__ == '__main__': + from optparse import OptionParser + _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app") + _opt = _cmd_parser.add_option + _opt("--version", action="store_true", help="show version number.") + _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.") + _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.") + _opt("-p", "--plugin", action="append", help="install additional plugin/s.") + _opt("--debug", action="store_true", help="start server in debug mode.") + _opt("--reload", action="store_true", help="auto-reload on file changes.") + _cmd_options, _cmd_args = _cmd_parser.parse_args() + if _cmd_options.server and _cmd_options.server.startswith('gevent'): + import gevent.monkey; gevent.monkey.patch_all() + +import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\ + os, re, subprocess, sys, tempfile, threading, time, warnings + +from datetime import date as datedate, datetime, timedelta +from tempfile import TemporaryFile +from traceback import format_exc, print_exc +from inspect import getargspec +from unicodedata import normalize + + +try: from simplejson import dumps as json_dumps, loads as json_lds +except ImportError: # pragma: no cover + try: from json import dumps as json_dumps, loads as json_lds + except ImportError: + try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds + except ImportError: + def json_dumps(data): + raise ImportError("JSON support requires Python 2.6 or simplejson.") + json_lds = json_dumps + + + +# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities. +# It ain't pretty but it works... Sorry for the mess. + +py = sys.version_info +py3k = py >= (3, 0, 0) +py25 = py < (2, 6, 0) +py31 = (3, 1, 0) <= py < (3, 2, 0) + +# Workaround for the missing "as" keyword in py3k. +def _e(): return sys.exc_info()[1] + +# Workaround for the "print is a keyword/function" Python 2/3 dilemma +# and a fallback for mod_wsgi (resticts stdout/err attribute access) +try: + _stdout, _stderr = sys.stdout.write, sys.stderr.write +except IOError: + _stdout = lambda x: sys.stdout.write(x) + _stderr = lambda x: sys.stderr.write(x) + +# Lots of stdlib and builtin differences. +if py3k: + import http.client as httplib + import _thread as thread + from urllib.parse import urljoin, SplitResult as UrlSplitResult + from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote + urlunquote = functools.partial(urlunquote, encoding='latin1') + from http.cookies import SimpleCookie + from collections import MutableMapping as DictMixin + import pickle + from io import BytesIO + from configparser import ConfigParser + basestring = str + unicode = str + json_loads = lambda s: json_lds(touni(s)) + callable = lambda x: hasattr(x, '__call__') + imap = map + def _raise(*a): raise a[0](a[1]).with_traceback(a[2]) +else: # 2.x + import httplib + import thread + from urlparse import urljoin, SplitResult as UrlSplitResult + from urllib import urlencode, quote as urlquote, unquote as urlunquote + from Cookie import SimpleCookie + from itertools import imap + import cPickle as pickle + from StringIO import StringIO as BytesIO + from ConfigParser import SafeConfigParser as ConfigParser + if py25: + msg = "Python 2.5 support may be dropped in future versions of Bottle." + warnings.warn(msg, DeprecationWarning) + from UserDict import DictMixin + def next(it): return it.next() + bytes = str + else: # 2.6, 2.7 + from collections import MutableMapping as DictMixin + unicode = unicode + json_loads = json_lds + eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '', 'exec')) + + +# Some helpers for string/byte handling +def tob(s, enc='utf8'): + return s.encode(enc) if isinstance(s, unicode) else bytes(s) + + +def touni(s, enc='utf8', err='strict'): + if isinstance(s, bytes): + return s.decode(enc, err) + else: + return unicode(s or ("" if s is None else s)) + +tonat = touni if py3k else tob + +# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). +# 3.1 needs a workaround. +if py31: + from io import TextIOWrapper + + class NCTextIOWrapper(TextIOWrapper): + def close(self): pass # Keep wrapped buffer open. + + +# A bug in functools causes it to break if the wrapper is an instance method +def update_wrapper(wrapper, wrapped, *a, **ka): + try: + functools.update_wrapper(wrapper, wrapped, *a, **ka) + except AttributeError: + pass + + +# These helpers are used at module level and need to be defined first. +# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. + +def depr(message, strict=False): + warnings.warn(message, DeprecationWarning, stacklevel=3) + +def makelist(data): # This is just to handy + if isinstance(data, (tuple, list, set, dict)): + return list(data) + elif data: + return [data] + else: + return [] + + +class DictProperty(object): + """ Property that maps to a key in a local dict-like attribute. """ + def __init__(self, attr, key=None, read_only=False): + self.attr, self.key, self.read_only = attr, key, read_only + + def __call__(self, func): + functools.update_wrapper(self, func, updated=[]) + self.getter, self.key = func, self.key or func.__name__ + return self + + def __get__(self, obj, cls): + if obj is None: return self + key, storage = self.key, getattr(obj, self.attr) + if key not in storage: storage[key] = self.getter(obj) + return storage[key] + + def __set__(self, obj, value): + if self.read_only: raise AttributeError("Read-Only property.") + getattr(obj, self.attr)[self.key] = value + + def __delete__(self, obj): + if self.read_only: raise AttributeError("Read-Only property.") + del getattr(obj, self.attr)[self.key] + + +class cached_property(object): + """ A property that is only computed once per instance and then replaces + itself with an ordinary attribute. Deleting the attribute resets the + property. """ + + def __init__(self, func): + self.__doc__ = getattr(func, '__doc__') + self.func = func + + def __get__(self, obj, cls): + if obj is None: return self + value = obj.__dict__[self.func.__name__] = self.func(obj) + return value + + +class lazy_attribute(object): + """ A property that caches itself to the class object. """ + def __init__(self, func): + functools.update_wrapper(self, func, updated=[]) + self.getter = func + + def __get__(self, obj, cls): + value = self.getter(cls) + setattr(cls, self.__name__, value) + return value + + + + + + +############################################################################### +# Exceptions and Events ######################################################## +############################################################################### + + +class BottleException(Exception): + """ A base class for exceptions used by bottle. """ + pass + + + + + + +############################################################################### +# Routing ###################################################################### +############################################################################### + + +class RouteError(BottleException): + """ This is a base class for all routing related exceptions """ + + +class RouteReset(BottleException): + """ If raised by a plugin or request handler, the route is reset and all + plugins are re-applied. """ + +class RouterUnknownModeError(RouteError): pass + + +class RouteSyntaxError(RouteError): + """ The route parser found something not supported by this router. """ + + +class RouteBuildError(RouteError): + """ The route could not be built. """ + + +def _re_flatten(p): + """ Turn all capturing groups in a regular expression pattern into + non-capturing groups. """ + if '(' not in p: + return p + return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', + lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) + + +class Router(object): + """ A Router is an ordered collection of route->target pairs. It is used to + efficiently match WSGI requests against a number of routes and return + the first target that satisfies the request. The target may be anything, + usually a string, ID or callable object. A route consists of a path-rule + and a HTTP method. + + The path-rule is either a static path (e.g. `/contact`) or a dynamic + path that contains wildcards (e.g. `/wiki/`). The wildcard syntax + and details on the matching order are described in docs:`routing`. + """ + + default_pattern = '[^/]+' + default_filter = 're' + + #: The current CPython regexp implementation does not allow more + #: than 99 matching groups per regular expression. + _MAX_GROUPS_PER_PATTERN = 99 + + def __init__(self, strict=False): + self.rules = [] # All rules in order + self._groups = {} # index of regexes to find them in dyna_routes + self.builder = {} # Data structure for the url builder + self.static = {} # Search structure for static routes + self.dyna_routes = {} + self.dyna_regexes = {} # Search structure for dynamic routes + #: If true, static routes are no longer checked first. + self.strict_order = strict + self.filters = { + 're': lambda conf: + (_re_flatten(conf or self.default_pattern), None, None), + 'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))), + 'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))), + 'path': lambda conf: (r'.+?', None, None)} + + def add_filter(self, name, func): + """ Add a filter. The provided function is called with the configuration + string as parameter and must return a (regexp, to_python, to_url) tuple. + The first element is a string, the last two are callables or None. """ + self.filters[name] = func + + rule_syntax = re.compile('(\\\\*)' + '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)' + '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)' + '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))') + + def _itertokens(self, rule): + offset, prefix = 0, '' + for match in self.rule_syntax.finditer(rule): + prefix += rule[offset:match.start()] + g = match.groups() + if len(g[0])%2: # Escaped wildcard + prefix += match.group(0)[len(g[0]):] + offset = match.end() + continue + if prefix: + yield prefix, None, None + name, filtr, conf = g[4:7] if g[2] is None else g[1:4] + yield name, filtr or 'default', conf or None + offset, prefix = match.end(), '' + if offset <= len(rule) or prefix: + yield prefix+rule[offset:], None, None + + def add(self, rule, method, target, name=None): + """ Add a new rule or replace the target for an existing rule. """ + anons = 0 # Number of anonymous wildcards found + keys = [] # Names of keys + pattern = '' # Regular expression pattern with named groups + filters = [] # Lists of wildcard input filters + builder = [] # Data structure for the URL builder + is_static = True + + for key, mode, conf in self._itertokens(rule): + if mode: + is_static = False + if mode == 'default': mode = self.default_filter + mask, in_filter, out_filter = self.filters[mode](conf) + if not key: + pattern += '(?:%s)' % mask + key = 'anon%d' % anons + anons += 1 + else: + pattern += '(?P<%s>%s)' % (key, mask) + keys.append(key) + if in_filter: filters.append((key, in_filter)) + builder.append((key, out_filter or str)) + elif key: + pattern += re.escape(key) + builder.append((None, key)) + + self.builder[rule] = builder + if name: self.builder[name] = builder + + if is_static and not self.strict_order: + self.static.setdefault(method, {}) + self.static[method][self.build(rule)] = (target, None) + return + + try: + re_pattern = re.compile('^(%s)$' % pattern) + re_match = re_pattern.match + except re.error: + raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) + + if filters: + def getargs(path): + url_args = re_match(path).groupdict() + for name, wildcard_filter in filters: + try: + url_args[name] = wildcard_filter(url_args[name]) + except ValueError: + raise HTTPError(400, 'Path has wrong format.') + return url_args + elif re_pattern.groupindex: + def getargs(path): + return re_match(path).groupdict() + else: + getargs = None + + flatpat = _re_flatten(pattern) + whole_rule = (rule, flatpat, target, getargs) + + if (flatpat, method) in self._groups: + if DEBUG: + msg = 'Route <%s %s> overwrites a previously defined route' + warnings.warn(msg % (method, rule), RuntimeWarning) + self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule + else: + self.dyna_routes.setdefault(method, []).append(whole_rule) + self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1 + + self._compile(method) + + def _compile(self, method): + all_rules = self.dyna_routes[method] + comborules = self.dyna_regexes[method] = [] + maxgroups = self._MAX_GROUPS_PER_PATTERN + for x in range(0, len(all_rules), maxgroups): + some = all_rules[x:x+maxgroups] + combined = (flatpat for (_, flatpat, _, _) in some) + combined = '|'.join('(^%s$)' % flatpat for flatpat in combined) + combined = re.compile(combined).match + rules = [(target, getargs) for (_, _, target, getargs) in some] + comborules.append((combined, rules)) + + def build(self, _name, *anons, **query): + """ Build an URL by filling the wildcards in a rule. """ + builder = self.builder.get(_name) + if not builder: raise RouteBuildError("No route with that name.", _name) + try: + for i, value in enumerate(anons): query['anon%d'%i] = value + url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder]) + return url if not query else url+'?'+urlencode(query) + except KeyError: + raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) + + def match(self, environ): + """ Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). """ + verb = environ['REQUEST_METHOD'].upper() + path = environ['PATH_INFO'] or '/' + + if verb == 'HEAD': + methods = ['PROXY', verb, 'GET', 'ANY'] + else: + methods = ['PROXY', verb, 'ANY'] + + for method in methods: + if method in self.static and path in self.static[method]: + target, getargs = self.static[method][path] + return target, getargs(path) if getargs else {} + elif method in self.dyna_regexes: + for combined, rules in self.dyna_regexes[method]: + match = combined(path) + if match: + target, getargs = rules[match.lastindex - 1] + return target, getargs(path) if getargs else {} + + # No matching route found. Collect alternative methods for 405 response + allowed = set([]) + nocheck = set(methods) + for method in set(self.static) - nocheck: + if path in self.static[method]: + allowed.add(verb) + for method in set(self.dyna_regexes) - allowed - nocheck: + for combined, rules in self.dyna_regexes[method]: + match = combined(path) + if match: + allowed.add(method) + if allowed: + allow_header = ",".join(sorted(allowed)) + raise HTTPError(405, "Method not allowed.", Allow=allow_header) + + # No matching route and no alternative method found. We give up + raise HTTPError(404, "Not found: " + repr(path)) + + + + + + +class Route(object): + """ This class wraps a route callback along with route specific metadata and + configuration and applies Plugins on demand. It is also responsible for + turing an URL path rule into a regular expression usable by the Router. + """ + + def __init__(self, app, rule, method, callback, name=None, + plugins=None, skiplist=None, **config): + #: The application this route is installed to. + self.app = app + #: The path-rule string (e.g. ``/wiki/:page``). + self.rule = rule + #: The HTTP method as a string (e.g. ``GET``). + self.method = method + #: The original callback with no plugins applied. Useful for introspection. + self.callback = callback + #: The name of the route (if specified) or ``None``. + self.name = name or None + #: A list of route-specific plugins (see :meth:`Bottle.route`). + self.plugins = plugins or [] + #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). + self.skiplist = skiplist or [] + #: Additional keyword arguments passed to the :meth:`Bottle.route` + #: decorator are stored in this dictionary. Used for route-specific + #: plugin configuration and meta-data. + self.config = ConfigDict().load_dict(config) + + @cached_property + def call(self): + """ The route callback with all plugins applied. This property is + created on demand and then cached to speed up subsequent requests.""" + return self._make_callback() + + def reset(self): + """ Forget any cached values. The next time :attr:`call` is accessed, + all plugins are re-applied. """ + self.__dict__.pop('call', None) + + def prepare(self): + """ Do all on-demand work immediately (useful for debugging).""" + self.call + + def all_plugins(self): + """ Yield all Plugins affecting this route. """ + unique = set() + for p in reversed(self.app.plugins + self.plugins): + if True in self.skiplist: break + name = getattr(p, 'name', False) + if name and (name in self.skiplist or name in unique): continue + if p in self.skiplist or type(p) in self.skiplist: continue + if name: unique.add(name) + yield p + + def _make_callback(self): + callback = self.callback + for plugin in self.all_plugins(): + try: + if hasattr(plugin, 'apply'): + callback = plugin.apply(callback, self) + else: + callback = plugin(callback) + except RouteReset: # Try again with changed configuration. + return self._make_callback() + if not callback is self.callback: + update_wrapper(callback, self.callback) + return callback + + def get_undecorated_callback(self): + """ Return the callback. If the callback is a decorated function, try to + recover the original function. """ + func = self.callback + func = getattr(func, '__func__' if py3k else 'im_func', func) + closure_attr = '__closure__' if py3k else 'func_closure' + while hasattr(func, closure_attr) and getattr(func, closure_attr): + func = getattr(func, closure_attr)[0].cell_contents + return func + + def get_callback_args(self): + """ Return a list of argument names the callback (most likely) accepts + as keyword arguments. If the callback is a decorated function, try + to recover the original function before inspection. """ + return getargspec(self.get_undecorated_callback())[0] + + def get_config(self, key, default=None): + """ Lookup a config field and return its value, first checking the + route.config, then route.app.config.""" + for conf in (self.config, self.app.conifg): + if key in conf: return conf[key] + return default + + def __repr__(self): + cb = self.get_undecorated_callback() + return '<%s %r %r>' % (self.method, self.rule, cb) + + + + + + +############################################################################### +# Application Object ########################################################### +############################################################################### + + +class Bottle(object): + """ Each Bottle object represents a single, distinct web application and + consists of routes, callbacks, plugins, resources and configuration. + Instances are callable WSGI applications. + + :param catchall: If true (default), handle all exceptions. Turn off to + let debugging middleware handle exceptions. + """ + + def __init__(self, catchall=True, autojson=True): + + #: A :class:`ConfigDict` for app specific configuration. + self.config = ConfigDict() + self.config._on_change = functools.partial(self.trigger_hook, 'config') + self.config.meta_set('autojson', 'validate', bool) + self.config.meta_set('catchall', 'validate', bool) + self.config['catchall'] = catchall + self.config['autojson'] = autojson + + #: A :class:`ResourceManager` for application files + self.resources = ResourceManager() + + self.routes = [] # List of installed :class:`Route` instances. + self.router = Router() # Maps requests to :class:`Route` instances. + self.error_handler = {} + + # Core plugins + self.plugins = [] # List of installed plugins. + if self.config['autojson']: + self.install(JSONPlugin()) + self.install(TemplatePlugin()) + + #: If true, most exceptions are caught and returned as :exc:`HTTPError` + catchall = DictProperty('config', 'catchall') + + __hook_names = 'before_request', 'after_request', 'app_reset', 'config' + __hook_reversed = 'after_request' + + @cached_property + def _hooks(self): + return dict((name, []) for name in self.__hook_names) + + def add_hook(self, name, func): + """ Attach a callback to a hook. Three hooks are currently implemented: + + before_request + Executed once before each request. The request context is + available, but no routing has happened yet. + after_request + Executed once after each request regardless of its outcome. + app_reset + Called whenever :meth:`Bottle.reset` is called. + """ + if name in self.__hook_reversed: + self._hooks[name].insert(0, func) + else: + self._hooks[name].append(func) + + def remove_hook(self, name, func): + """ Remove a callback from a hook. """ + if name in self._hooks and func in self._hooks[name]: + self._hooks[name].remove(func) + return True + + def trigger_hook(self, __name, *args, **kwargs): + """ Trigger a hook and return a list of results. """ + return [hook(*args, **kwargs) for hook in self._hooks[__name][:]] + + def hook(self, name): + """ Return a decorator that attaches a callback to a hook. See + :meth:`add_hook` for details.""" + def decorator(func): + self.add_hook(name, func) + return func + return decorator + + def mount(self, prefix, app, **options): + """ Mount an application (:class:`Bottle` or plain WSGI) to a specific + URL prefix. Example:: + + root_app.mount('/admin/', admin_app) + + :param prefix: path prefix or `mount-point`. If it ends in a slash, + that slash is mandatory. + :param app: an instance of :class:`Bottle` or a WSGI application. + + All other parameters are passed to the underlying :meth:`route` call. + """ + + segments = [p for p in prefix.split('/') if p] + if not segments: raise ValueError('Empty path prefix.') + path_depth = len(segments) + + def mountpoint_wrapper(): + try: + request.path_shift(path_depth) + rs = HTTPResponse([]) + def start_response(status, headerlist, exc_info=None): + if exc_info: + _raise(*exc_info) + rs.status = status + for name, value in headerlist: rs.add_header(name, value) + return rs.body.append + body = app(request.environ, start_response) + if body and rs.body: body = itertools.chain(rs.body, body) + rs.body = body or rs.body + return rs + finally: + request.path_shift(-path_depth) + + options.setdefault('skip', True) + options.setdefault('method', 'PROXY') + options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) + options['callback'] = mountpoint_wrapper + + self.route('/%s/<:re:.*>' % '/'.join(segments), **options) + if not prefix.endswith('/'): + self.route('/' + '/'.join(segments), **options) + + def merge(self, routes): + """ Merge the routes of another :class:`Bottle` application or a list of + :class:`Route` objects into this application. The routes keep their + 'owner', meaning that the :data:`Route.app` attribute is not + changed. """ + if isinstance(routes, Bottle): + routes = routes.routes + for route in routes: + self.add_route(route) + + def install(self, plugin): + """ Add a plugin to the list of plugins and prepare it for being + applied to all routes of this application. A plugin may be a simple + decorator or an object that implements the :class:`Plugin` API. + """ + if hasattr(plugin, 'setup'): plugin.setup(self) + if not callable(plugin) and not hasattr(plugin, 'apply'): + raise TypeError("Plugins must be callable or implement .apply()") + self.plugins.append(plugin) + self.reset() + return plugin + + def uninstall(self, plugin): + """ Uninstall plugins. Pass an instance to remove a specific plugin, a type + object to remove all plugins that match that type, a string to remove + all plugins with a matching ``name`` attribute or ``True`` to remove all + plugins. Return the list of removed plugins. """ + removed, remove = [], plugin + for i, plugin in list(enumerate(self.plugins))[::-1]: + if remove is True or remove is plugin or remove is type(plugin) \ + or getattr(plugin, 'name', True) == remove: + removed.append(plugin) + del self.plugins[i] + if hasattr(plugin, 'close'): plugin.close() + if removed: self.reset() + return removed + + def reset(self, route=None): + """ Reset all routes (force plugins to be re-applied) and clear all + caches. If an ID or route object is given, only that specific route + is affected. """ + if route is None: routes = self.routes + elif isinstance(route, Route): routes = [route] + else: routes = [self.routes[route]] + for route in routes: route.reset() + if DEBUG: + for route in routes: route.prepare() + self.trigger_hook('app_reset') + + def close(self): + """ Close the application and all installed plugins. """ + for plugin in self.plugins: + if hasattr(plugin, 'close'): plugin.close() + + def run(self, **kwargs): + """ Calls :func:`run` with the same parameters. """ + run(self, **kwargs) + + def match(self, environ): + """ Search for a matching route and return a (:class:`Route` , urlargs) + tuple. The second value is a dictionary with parameters extracted + from the URL. Raise :exc:`HTTPError` (404/405) on a non-match.""" + return self.router.match(environ) + + def get_url(self, routename, **kargs): + """ Return a string that matches a named route """ + scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' + location = self.router.build(routename, **kargs).lstrip('/') + return urljoin(urljoin('/', scriptname), location) + + def add_route(self, route): + """ Add a route object, but do not change the :data:`Route.app` + attribute.""" + self.routes.append(route) + self.router.add(route.rule, route.method, route, name=route.name) + if DEBUG: route.prepare() + + def route(self, path=None, method='GET', callback=None, name=None, + apply=None, skip=None, **config): + """ A decorator to bind a function to a request URL. Example:: + + @app.route('/hello/:name') + def hello(name): + return 'Hello %s' % name + + The ``:name`` part is a wildcard. See :class:`Router` for syntax + details. + + :param path: Request path or a list of paths to listen to. If no + path is specified, it is automatically generated from the + signature of the function. + :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of + methods to listen to. (default: `GET`) + :param callback: An optional shortcut to avoid the decorator + syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` + :param name: The name for this route. (default: None) + :param apply: A decorator or plugin or a list of plugins. These are + applied to the route callback in addition to installed plugins. + :param skip: A list of plugins, plugin classes or names. Matching + plugins are not installed to this route. ``True`` skips all. + + Any additional keyword arguments are stored as route-specific + configuration and passed to plugins (see :meth:`Plugin.apply`). + """ + if callable(path): path, callback = None, path + plugins = makelist(apply) + skiplist = makelist(skip) + def decorator(callback): + if isinstance(callback, basestring): callback = load(callback) + for rule in makelist(path) or yieldroutes(callback): + for verb in makelist(method): + verb = verb.upper() + route = Route(self, rule, verb, callback, name=name, + plugins=plugins, skiplist=skiplist, **config) + self.add_route(route) + return callback + return decorator(callback) if callback else decorator + + def get(self, path=None, method='GET', **options): + """ Equals :meth:`route`. """ + return self.route(path, method, **options) + + def post(self, path=None, method='POST', **options): + """ Equals :meth:`route` with a ``POST`` method parameter. """ + return self.route(path, method, **options) + + def put(self, path=None, method='PUT', **options): + """ Equals :meth:`route` with a ``PUT`` method parameter. """ + return self.route(path, method, **options) + + def delete(self, path=None, method='DELETE', **options): + """ Equals :meth:`route` with a ``DELETE`` method parameter. """ + return self.route(path, method, **options) + + def patch(self, path=None, method='PATCH', **options): + """ Equals :meth:`route` with a ``PATCH`` method parameter. """ + return self.route(path, method, **options) + + def error(self, code=500): + """ Decorator: Register an output handler for a HTTP error code""" + def wrapper(handler): + self.error_handler[int(code)] = handler + return handler + return wrapper + + def default_error_handler(self, res): + return tob(template(ERROR_PAGE_TEMPLATE, e=res)) + + def _handle(self, environ): + path = environ['bottle.raw_path'] = environ['PATH_INFO'] + if py3k: + try: + environ['PATH_INFO'] = path.encode('latin1').decode('utf8') + except UnicodeError: + return HTTPError(400, 'Invalid path string. Expected UTF-8') + + try: + environ['bottle.app'] = self + request.bind(environ) + response.bind() + try: + self.trigger_hook('before_request') + route, args = self.router.match(environ) + environ['route.handle'] = route + environ['bottle.route'] = route + environ['route.url_args'] = args + return route.call(**args) + finally: + self.trigger_hook('after_request') + except HTTPResponse: + return _e() + except RouteReset: + route.reset() + return self._handle(environ) + except (KeyboardInterrupt, SystemExit, MemoryError): + raise + except Exception: + if not self.catchall: raise + stacktrace = format_exc() + environ['wsgi.errors'].write(stacktrace) + return HTTPError(500, "Internal Server Error", _e(), stacktrace) + + def _cast(self, out, peek=None): + """ Try to convert the parameter into something WSGI compatible and set + correct HTTP headers when possible. + Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, + iterable of strings and iterable of unicodes + """ + + # Empty output is done here + if not out: + if 'Content-Length' not in response: + response['Content-Length'] = 0 + return [] + # Join lists of byte or unicode strings. Mixed lists are NOT supported + if isinstance(out, (tuple, list))\ + and isinstance(out[0], (bytes, unicode)): + out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' + # Encode unicode strings + if isinstance(out, unicode): + out = out.encode(response.charset) + # Byte Strings are just returned + if isinstance(out, bytes): + if 'Content-Length' not in response: + response['Content-Length'] = len(out) + return [out] + # HTTPError or HTTPException (recursive, because they may wrap anything) + # TODO: Handle these explicitly in handle() or make them iterable. + if isinstance(out, HTTPError): + out.apply(response) + out = self.error_handler.get(out.status_code, self.default_error_handler)(out) + return self._cast(out) + if isinstance(out, HTTPResponse): + out.apply(response) + return self._cast(out.body) + + # File-like objects. + if hasattr(out, 'read'): + if 'wsgi.file_wrapper' in request.environ: + return request.environ['wsgi.file_wrapper'](out) + elif hasattr(out, 'close') or not hasattr(out, '__iter__'): + return WSGIFileWrapper(out) + + # Handle Iterables. We peek into them to detect their inner type. + try: + iout = iter(out) + first = next(iout) + while not first: + first = next(iout) + except StopIteration: + return self._cast('') + except HTTPResponse: + first = _e() + except (KeyboardInterrupt, SystemExit, MemoryError): + raise + except: + if not self.catchall: raise + first = HTTPError(500, 'Unhandled exception', _e(), format_exc()) + + # These are the inner types allowed in iterator or generator objects. + if isinstance(first, HTTPResponse): + return self._cast(first) + elif isinstance(first, bytes): + new_iter = itertools.chain([first], iout) + elif isinstance(first, unicode): + encoder = lambda x: x.encode(response.charset) + new_iter = imap(encoder, itertools.chain([first], iout)) + else: + msg = 'Unsupported response type: %s' % type(first) + return self._cast(HTTPError(500, msg)) + if hasattr(out, 'close'): + new_iter = _closeiter(new_iter, out.close) + return new_iter + + def wsgi(self, environ, start_response): + """ The bottle WSGI-interface. """ + try: + out = self._cast(self._handle(environ)) + # rfc2616 section 4.3 + if response._status_code in (100, 101, 204, 304)\ + or environ['REQUEST_METHOD'] == 'HEAD': + if hasattr(out, 'close'): out.close() + out = [] + start_response(response._status_line, response.headerlist) + return out + except (KeyboardInterrupt, SystemExit, MemoryError): + raise + except: + if not self.catchall: raise + err = '

      Critical error while processing request: %s

      ' \ + % html_escape(environ.get('PATH_INFO', '/')) + if DEBUG: + err += '

      Error:

      \n
      \n%s\n
      \n' \ + '

      Traceback:

      \n
      \n%s\n
      \n' \ + % (html_escape(repr(_e())), html_escape(format_exc())) + environ['wsgi.errors'].write(err) + headers = [('Content-Type', 'text/html; charset=UTF-8')] + start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info()) + return [tob(err)] + + def __call__(self, environ, start_response): + """ Each instance of :class:'Bottle' is a WSGI application. """ + return self.wsgi(environ, start_response) + + def __enter__(self): + """ Use this application as default for all module-level shortcuts. """ + default_app.push(self) + return self + + def __exit__(self, exc_type, exc_value, traceback): + default_app.pop() + + + + + +############################################################################### +# HTTP and WSGI Tools ########################################################## +############################################################################### + +class BaseRequest(object): + """ A wrapper for WSGI environment dictionaries that adds a lot of + convenient access methods and properties. Most of them are read-only. + + Adding new attributes to a request actually adds them to the environ + dictionary (as 'bottle.request.ext.'). This is the recommended + way to store and access request-specific data. + """ + + __slots__ = ('environ', ) + + #: Maximum size of memory buffer for :attr:`body` in bytes. + MEMFILE_MAX = 102400 + + def __init__(self, environ=None): + """ Wrap a WSGI environ dictionary. """ + #: The wrapped WSGI environ dictionary. This is the only real attribute. + #: All other attributes actually are read-only properties. + self.environ = {} if environ is None else environ + self.environ['bottle.request'] = self + + @DictProperty('environ', 'bottle.app', read_only=True) + def app(self): + """ Bottle application handling this request. """ + raise RuntimeError('This request is not connected to an application.') + + @DictProperty('environ', 'bottle.route', read_only=True) + def route(self): + """ The bottle :class:`Route` object that matches this request. """ + raise RuntimeError('This request is not connected to a route.') + + @DictProperty('environ', 'route.url_args', read_only=True) + def url_args(self): + """ The arguments extracted from the URL. """ + raise RuntimeError('This request is not connected to a route.') + + @property + def path(self): + """ The value of ``PATH_INFO`` with exactly one prefixed slash (to fix + broken clients and avoid the "empty path" edge case). """ + return '/' + self.environ.get('PATH_INFO','').lstrip('/') + + @property + def method(self): + """ The ``REQUEST_METHOD`` value as an uppercase string. """ + return self.environ.get('REQUEST_METHOD', 'GET').upper() + + @DictProperty('environ', 'bottle.request.headers', read_only=True) + def headers(self): + """ A :class:`WSGIHeaderDict` that provides case-insensitive access to + HTTP request headers. """ + return WSGIHeaderDict(self.environ) + + def get_header(self, name, default=None): + """ Return the value of a request header, or a given default value. """ + return self.headers.get(name, default) + + @DictProperty('environ', 'bottle.request.cookies', read_only=True) + def cookies(self): + """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT + decoded. Use :meth:`get_cookie` if you expect signed cookies. """ + cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values() + return FormsDict((c.key, c.value) for c in cookies) + + def get_cookie(self, key, default=None, secret=None): + """ Return the content of a cookie. To read a `Signed Cookie`, the + `secret` must match the one used to create the cookie (see + :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing + cookie or wrong signature), return a default value. """ + value = self.cookies.get(key) + if secret and value: + dec = cookie_decode(value, secret) # (key, value) tuple or None + return dec[1] if dec and dec[0] == key else default + return value or default + + @DictProperty('environ', 'bottle.request.query', read_only=True) + def query(self): + """ The :attr:`query_string` parsed into a :class:`FormsDict`. These + values are sometimes called "URL arguments" or "GET parameters", but + not to be confused with "URL wildcards" as they are provided by the + :class:`Router`. """ + get = self.environ['bottle.get'] = FormsDict() + pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) + for key, value in pairs: + get[key] = value + return get + + @DictProperty('environ', 'bottle.request.forms', read_only=True) + def forms(self): + """ Form values parsed from an `url-encoded` or `multipart/form-data` + encoded POST or PUT request body. The result is returned as a + :class:`FormsDict`. All keys and values are strings. File uploads + are stored separately in :attr:`files`. """ + forms = FormsDict() + for name, item in self.POST.allitems(): + if not isinstance(item, FileUpload): + forms[name] = item + return forms + + @DictProperty('environ', 'bottle.request.params', read_only=True) + def params(self): + """ A :class:`FormsDict` with the combined values of :attr:`query` and + :attr:`forms`. File uploads are stored in :attr:`files`. """ + params = FormsDict() + for key, value in self.query.allitems(): + params[key] = value + for key, value in self.forms.allitems(): + params[key] = value + return params + + @DictProperty('environ', 'bottle.request.files', read_only=True) + def files(self): + """ File uploads parsed from `multipart/form-data` encoded POST or PUT + request body. The values are instances of :class:`FileUpload`. + + """ + files = FormsDict() + for name, item in self.POST.allitems(): + if isinstance(item, FileUpload): + files[name] = item + return files + + @DictProperty('environ', 'bottle.request.json', read_only=True) + def json(self): + """ If the ``Content-Type`` header is ``application/json``, this + property holds the parsed content of the request body. Only requests + smaller than :attr:`MEMFILE_MAX` are processed to avoid memory + exhaustion. """ + if 'application/json' in self.environ.get('CONTENT_TYPE', ''): + return json_loads(self._get_body_string()) + return None + + def _iter_body(self, read, bufsize): + maxread = max(0, self.content_length) + while maxread: + part = read(min(maxread, bufsize)) + if not part: break + yield part + maxread -= len(part) + + @staticmethod + def _iter_chunked(read, bufsize): + err = HTTPError(400, 'Error while parsing chunked transfer body.') + rn, sem, bs = tob('\r\n'), tob(';'), tob('') + while True: + header = read(1) + while header[-2:] != rn: + c = read(1) + header += c + if not c: raise err + if len(header) > bufsize: raise err + size, _, _ = header.partition(sem) + try: + maxread = int(tonat(size.strip()), 16) + except ValueError: + raise err + if maxread == 0: break + buff = bs + while maxread > 0: + if not buff: + buff = read(min(maxread, bufsize)) + part, buff = buff[:maxread], buff[maxread:] + if not part: raise err + yield part + maxread -= len(part) + if read(2) != rn: + raise err + + @DictProperty('environ', 'bottle.request.body', read_only=True) + def _body(self): + body_iter = self._iter_chunked if self.chunked else self._iter_body + read_func = self.environ['wsgi.input'].read + body, body_size, is_temp_file = BytesIO(), 0, False + for part in body_iter(read_func, self.MEMFILE_MAX): + body.write(part) + body_size += len(part) + if not is_temp_file and body_size > self.MEMFILE_MAX: + body, tmp = TemporaryFile(mode='w+b'), body + body.write(tmp.getvalue()) + del tmp + is_temp_file = True + self.environ['wsgi.input'] = body + body.seek(0) + return body + + def _get_body_string(self): + """ read body until content-length or MEMFILE_MAX into a string. Raise + HTTPError(413) on requests that are to large. """ + clen = self.content_length + if clen > self.MEMFILE_MAX: + raise HTTPError(413, 'Request to large') + if clen < 0: clen = self.MEMFILE_MAX + 1 + data = self.body.read(clen) + if len(data) > self.MEMFILE_MAX: # Fail fast + raise HTTPError(413, 'Request to large') + return data + + @property + def body(self): + """ The HTTP request body as a seek-able file-like object. Depending on + :attr:`MEMFILE_MAX`, this is either a temporary file or a + :class:`io.BytesIO` instance. Accessing this property for the first + time reads and replaces the ``wsgi.input`` environ variable. + Subsequent accesses just do a `seek(0)` on the file object. """ + self._body.seek(0) + return self._body + + @property + def chunked(self): + """ True if Chunked transfer encoding was. """ + return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower() + + #: An alias for :attr:`query`. + GET = query + + @DictProperty('environ', 'bottle.request.post', read_only=True) + def POST(self): + """ The values of :attr:`forms` and :attr:`files` combined into a single + :class:`FormsDict`. Values are either strings (form values) or + instances of :class:`cgi.FieldStorage` (file uploads). + """ + post = FormsDict() + # We default to application/x-www-form-urlencoded for everything that + # is not multipart and take the fast path (also: 3.1 workaround) + if not self.content_type.startswith('multipart/'): + pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1')) + for key, value in pairs: + post[key] = value + return post + + safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi + for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): + if key in self.environ: safe_env[key] = self.environ[key] + args = dict(fp=self.body, environ=safe_env, keep_blank_values=True) + if py31: + args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8', + newline='\n') + elif py3k: + args['encoding'] = 'utf8' + data = cgi.FieldStorage(**args) + data = data.list or [] + for item in data: + if item.filename: + post[item.name] = FileUpload(item.file, item.name, + item.filename, item.headers) + else: + post[item.name] = item.value + return post + + @property + def url(self): + """ The full request URI including hostname and scheme. If your app + lives behind a reverse proxy or load balancer and you get confusing + results, make sure that the ``X-Forwarded-Host`` header is set + correctly. """ + return self.urlparts.geturl() + + @DictProperty('environ', 'bottle.request.urlparts', read_only=True) + def urlparts(self): + """ The :attr:`url` string as an :class:`urlparse.SplitResult` tuple. + The tuple contains (scheme, host, path, query_string and fragment), + but the fragment is always empty because it is not visible to the + server. """ + env = self.environ + http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http') + host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') + if not host: + # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. + host = env.get('SERVER_NAME', '127.0.0.1') + port = env.get('SERVER_PORT') + if port and port != ('80' if http == 'http' else '443'): + host += ':' + port + path = urlquote(self.fullpath) + return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') + + @property + def fullpath(self): + """ Request path including :attr:`script_name` (if present). """ + return urljoin(self.script_name, self.path.lstrip('/')) + + @property + def query_string(self): + """ The raw :attr:`query` part of the URL (everything in between ``?`` + and ``#``) as a string. """ + return self.environ.get('QUERY_STRING', '') + + @property + def script_name(self): + """ The initial portion of the URL's `path` that was removed by a higher + level (server or routing middleware) before the application was + called. This script path is returned with leading and tailing + slashes. """ + script_name = self.environ.get('SCRIPT_NAME', '').strip('/') + return '/' + script_name + '/' if script_name else '/' + + def path_shift(self, shift=1): + """ Shift path segments from :attr:`path` to :attr:`script_name` and + vice versa. + + :param shift: The number of path segments to shift. May be negative + to change the shift direction. (default: 1) + """ + script = self.environ.get('SCRIPT_NAME','/') + self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift) + + @property + def content_length(self): + """ The request body length as an integer. The client is responsible to + set this header. Otherwise, the real length of the body is unknown + and -1 is returned. In this case, :attr:`body` will be empty. """ + return int(self.environ.get('CONTENT_LENGTH') or -1) + + @property + def content_type(self): + """ The Content-Type header as a lowercase-string (default: empty). """ + return self.environ.get('CONTENT_TYPE', '').lower() + + @property + def is_xhr(self): + """ True if the request was triggered by a XMLHttpRequest. This only + works with JavaScript libraries that support the `X-Requested-With` + header (most of the popular libraries do). """ + requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','') + return requested_with.lower() == 'xmlhttprequest' + + @property + def is_ajax(self): + """ Alias for :attr:`is_xhr`. "Ajax" is not the right term. """ + return self.is_xhr + + @property + def auth(self): + """ HTTP authentication data as a (user, password) tuple. This + implementation currently supports basic (not digest) authentication + only. If the authentication happened at a higher level (e.g. in the + front web-server or a middleware), the password field is None, but + the user field is looked up from the ``REMOTE_USER`` environ + variable. On any errors, None is returned. """ + basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION','')) + if basic: return basic + ruser = self.environ.get('REMOTE_USER') + if ruser: return (ruser, None) + return None + + @property + def remote_route(self): + """ A list of all IPs that were involved in this request, starting with + the client IP and followed by zero or more proxies. This does only + work if all proxies support the ```X-Forwarded-For`` header. Note + that this information can be forged by malicious clients. """ + proxy = self.environ.get('HTTP_X_FORWARDED_FOR') + if proxy: return [ip.strip() for ip in proxy.split(',')] + remote = self.environ.get('REMOTE_ADDR') + return [remote] if remote else [] + + @property + def remote_addr(self): + """ The client IP as a string. Note that this information can be forged + by malicious clients. """ + route = self.remote_route + return route[0] if route else None + + def copy(self): + """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """ + return Request(self.environ.copy()) + + def get(self, value, default=None): return self.environ.get(value, default) + def __getitem__(self, key): return self.environ[key] + def __delitem__(self, key): self[key] = ""; del(self.environ[key]) + def __iter__(self): return iter(self.environ) + def __len__(self): return len(self.environ) + def keys(self): return self.environ.keys() + def __setitem__(self, key, value): + """ Change an environ value and clear all caches that depend on it. """ + + if self.environ.get('bottle.request.readonly'): + raise KeyError('The environ dictionary is read-only.') + + self.environ[key] = value + todelete = () + + if key == 'wsgi.input': + todelete = ('body', 'forms', 'files', 'params', 'post', 'json') + elif key == 'QUERY_STRING': + todelete = ('query', 'params') + elif key.startswith('HTTP_'): + todelete = ('headers', 'cookies') + + for key in todelete: + self.environ.pop('bottle.request.'+key, None) + + def __repr__(self): + return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) + + def __getattr__(self, name): + """ Search in self.environ for additional user defined attributes. """ + try: + var = self.environ['bottle.request.ext.%s'%name] + return var.__get__(self) if hasattr(var, '__get__') else var + except KeyError: + raise AttributeError('Attribute %r not defined.' % name) + + def __setattr__(self, name, value): + if name == 'environ': return object.__setattr__(self, name, value) + self.environ['bottle.request.ext.%s'%name] = value + + + + +def _hkey(s): + return s.title().replace('_','-') + + +class HeaderProperty(object): + def __init__(self, name, reader=None, writer=str, default=''): + self.name, self.default = name, default + self.reader, self.writer = reader, writer + self.__doc__ = 'Current value of the %r header.' % name.title() + + def __get__(self, obj, _): + if obj is None: return self + value = obj.headers.get(self.name, self.default) + return self.reader(value) if self.reader else value + + def __set__(self, obj, value): + obj.headers[self.name] = self.writer(value) + + def __delete__(self, obj): + del obj.headers[self.name] + + +class BaseResponse(object): + """ Storage class for a response body as well as headers and cookies. + + This class does support dict-like case-insensitive item-access to + headers, but is NOT a dict. Most notably, iterating over a response + yields parts of the body and not the headers. + + :param body: The response body as one of the supported types. + :param status: Either an HTTP status code (e.g. 200) or a status line + including the reason phrase (e.g. '200 OK'). + :param headers: A dictionary or a list of name-value pairs. + + Additional keyword arguments are added to the list of headers. + Underscores in the header name are replaced with dashes. + """ + + default_status = 200 + default_content_type = 'text/html; charset=UTF-8' + + # Header blacklist for specific response codes + # (rfc2616 section 10.2.3 and 10.3.5) + bad_headers = { + 204: set(('Content-Type',)), + 304: set(('Allow', 'Content-Encoding', 'Content-Language', + 'Content-Length', 'Content-Range', 'Content-Type', + 'Content-Md5', 'Last-Modified'))} + + def __init__(self, body='', status=None, headers=None, **more_headers): + self._cookies = None + self._headers = {} + self.body = body + self.status = status or self.default_status + if headers: + if isinstance(headers, dict): + headers = headers.items() + for name, value in headers: + self.add_header(name, value) + if more_headers: + for name, value in more_headers.items(): + self.add_header(name, value) + + def copy(self, cls=None): + """ Returns a copy of self. """ + cls = cls or BaseResponse + assert issubclass(cls, BaseResponse) + copy = cls() + copy.status = self.status + copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) + if self._cookies: + copy._cookies = SimpleCookie() + copy._cookies.load(self._cookies.output()) + return copy + + def __iter__(self): + return iter(self.body) + + def close(self): + if hasattr(self.body, 'close'): + self.body.close() + + @property + def status_line(self): + """ The HTTP status line as a string (e.g. ``404 Not Found``).""" + return self._status_line + + @property + def status_code(self): + """ The HTTP status code as an integer (e.g. 404).""" + return self._status_code + + def _set_status(self, status): + if isinstance(status, int): + code, status = status, _HTTP_STATUS_LINES.get(status) + elif ' ' in status: + status = status.strip() + code = int(status.split()[0]) + else: + raise ValueError('String status line without a reason phrase.') + if not 100 <= code <= 999: raise ValueError('Status code out of range.') + self._status_code = code + self._status_line = str(status or ('%d Unknown' % code)) + + def _get_status(self): + return self._status_line + + status = property(_get_status, _set_status, None, + ''' A writeable property to change the HTTP response status. It accepts + either a numeric code (100-999) or a string with a custom reason + phrase (e.g. "404 Brain not found"). Both :data:`status_line` and + :data:`status_code` are updated accordingly. The return value is + always a status string. ''') + del _get_status, _set_status + + @property + def headers(self): + """ An instance of :class:`HeaderDict`, a case-insensitive dict-like + view on the response headers. """ + hdict = HeaderDict() + hdict.dict = self._headers + return hdict + + def __contains__(self, name): return _hkey(name) in self._headers + def __delitem__(self, name): del self._headers[_hkey(name)] + def __getitem__(self, name): return self._headers[_hkey(name)][-1] + def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)] + + def get_header(self, name, default=None): + """ Return the value of a previously defined header. If there is no + header with that name, return a default value. """ + return self._headers.get(_hkey(name), [default])[-1] + + def set_header(self, name, value): + """ Create a new response header, replacing any previously defined + headers with the same name. """ + self._headers[_hkey(name)] = [str(value)] + + def add_header(self, name, value): + """ Add an additional response header, not removing duplicates. """ + self._headers.setdefault(_hkey(name), []).append(str(value)) + + def iter_headers(self): + """ Yield (header, value) tuples, skipping headers that are not + allowed with the current response status code. """ + return self.headerlist + + @property + def headerlist(self): + """ WSGI conform list of (header, value) tuples. """ + out = [] + headers = list(self._headers.items()) + if 'Content-Type' not in self._headers: + headers.append(('Content-Type', [self.default_content_type])) + if self._status_code in self.bad_headers: + bad_headers = self.bad_headers[self._status_code] + headers = [h for h in headers if h[0] not in bad_headers] + out += [(name, val) for name, vals in headers for val in vals] + if self._cookies: + for c in self._cookies.values(): + out.append(('Set-Cookie', c.OutputString())) + return out + + content_type = HeaderProperty('Content-Type') + content_length = HeaderProperty('Content-Length', reader=int) + expires = HeaderProperty('Expires', + reader=lambda x: datetime.utcfromtimestamp(parse_date(x)), + writer=lambda x: http_date(x)) + + @property + def charset(self, default='UTF-8'): + """ Return the charset specified in the content-type header (default: utf8). """ + if 'charset=' in self.content_type: + return self.content_type.split('charset=')[-1].split(';')[0].strip() + return default + + def set_cookie(self, name, value, secret=None, **options): + """ Create a new cookie or replace an old one. If the `secret` parameter is + set, create a `Signed Cookie` (described below). + + :param name: the name of the cookie. + :param value: the value of the cookie. + :param secret: a signature key required for signed cookies. + + Additionally, this method accepts all RFC 2109 attributes that are + supported by :class:`cookie.Morsel`, including: + + :param max_age: maximum age in seconds. (default: None) + :param expires: a datetime object or UNIX timestamp. (default: None) + :param domain: the domain that is allowed to read the cookie. + (default: current domain) + :param path: limits the cookie to a given path (default: current path) + :param secure: limit the cookie to HTTPS connections (default: off). + :param httponly: prevents client-side javascript to read this cookie + (default: off, requires Python 2.6 or newer). + + If neither `expires` nor `max_age` is set (default), the cookie will + expire at the end of the browser session (as soon as the browser + window is closed). + + Signed cookies may store any pickle-able object and are + cryptographically signed to prevent manipulation. Keep in mind that + cookies are limited to 4kb in most browsers. + + Warning: Signed cookies are not encrypted (the client can still see + the content) and not copy-protected (the client can restore an old + cookie). The main intention is to make pickling and unpickling + save, not to store secret information at client side. + """ + if not self._cookies: + self._cookies = SimpleCookie() + + if secret: + value = touni(cookie_encode((name, value), secret)) + elif not isinstance(value, basestring): + raise TypeError('Secret key missing for non-string Cookie.') + + if len(value) > 4096: raise ValueError('Cookie value to long.') + self._cookies[name] = value + + for key, value in options.items(): + if key == 'max_age': + if isinstance(value, timedelta): + value = value.seconds + value.days * 24 * 3600 + if key == 'expires': + if isinstance(value, (datedate, datetime)): + value = value.timetuple() + elif isinstance(value, (int, float)): + value = time.gmtime(value) + value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) + self._cookies[name][key.replace('_', '-')] = value + + def delete_cookie(self, key, **kwargs): + """ Delete a cookie. Be sure to use the same `domain` and `path` + settings as used to create the cookie. """ + kwargs['max_age'] = -1 + kwargs['expires'] = 0 + self.set_cookie(key, '', **kwargs) + + def __repr__(self): + out = '' + for name, value in self.headerlist: + out += '%s: %s\n' % (name.title(), value.strip()) + return out + + +def _local_property(): + ls = threading.local() + def fget(_): + try: return ls.var + except AttributeError: + raise RuntimeError("Request context not initialized.") + def fset(_, value): ls.var = value + def fdel(_): del ls.var + return property(fget, fset, fdel, 'Thread-local property') + + +class LocalRequest(BaseRequest): + """ A thread-local subclass of :class:`BaseRequest` with a different + set of attributes for each thread. There is usually only one global + instance of this class (:data:`request`). If accessed during a + request/response cycle, this instance always refers to the *current* + request (even on a multithreaded server). """ + bind = BaseRequest.__init__ + environ = _local_property() + + +class LocalResponse(BaseResponse): + """ A thread-local subclass of :class:`BaseResponse` with a different + set of attributes for each thread. There is usually only one global + instance of this class (:data:`response`). Its attributes are used + to build the HTTP response at the end of the request/response cycle. + """ + bind = BaseResponse.__init__ + _status_line = _local_property() + _status_code = _local_property() + _cookies = _local_property() + _headers = _local_property() + body = _local_property() + + +Request = BaseRequest +Response = BaseResponse + + +class HTTPResponse(Response, BottleException): + def __init__(self, body='', status=None, headers=None, **more_headers): + super(HTTPResponse, self).__init__(body, status, headers, **more_headers) + + def apply(self, other): + other._status_code = self._status_code + other._status_line = self._status_line + other._headers = self._headers + other._cookies = self._cookies + other.body = self.body + + +class HTTPError(HTTPResponse): + default_status = 500 + def __init__(self, status=None, body=None, exception=None, traceback=None, + **options): + self.exception = exception + self.traceback = traceback + super(HTTPError, self).__init__(body, status, **options) + + + + + +############################################################################### +# Plugins ###################################################################### +############################################################################### + +class PluginError(BottleException): pass + + +class JSONPlugin(object): + name = 'json' + api = 2 + + def __init__(self, json_dumps=json_dumps): + self.json_dumps = json_dumps + + def apply(self, callback, _): + dumps = self.json_dumps + if not dumps: return callback + def wrapper(*a, **ka): + try: + rv = callback(*a, **ka) + except HTTPError: + rv = _e() + + if isinstance(rv, dict): + #Attempt to serialize, raises exception on failure + json_response = dumps(rv) + #Set content type only if serialization successful + response.content_type = 'application/json' + return json_response + elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict): + rv.body = dumps(rv.body) + rv.content_type = 'application/json' + return rv + + return wrapper + + +class TemplatePlugin(object): + """ This plugin applies the :func:`view` decorator to all routes with a + `template` config parameter. If the parameter is a tuple, the second + element must be a dict with additional options (e.g. `template_engine`) + or default variables for the template. """ + name = 'template' + api = 2 + + def apply(self, callback, route): + conf = route.config.get('template') + if isinstance(conf, (tuple, list)) and len(conf) == 2: + return view(conf[0], **conf[1])(callback) + elif isinstance(conf, str): + return view(conf)(callback) + else: + return callback + + +#: Not a plugin, but part of the plugin API. TODO: Find a better place. +class _ImportRedirect(object): + def __init__(self, name, impmask): + """ Create a virtual package that redirects imports (see PEP 302). """ + self.name = name + self.impmask = impmask + self.module = sys.modules.setdefault(name, imp.new_module(name)) + self.module.__dict__.update({'__file__': __file__, '__path__': [], + '__all__': [], '__loader__': self}) + sys.meta_path.append(self) + + def find_module(self, fullname, path=None): + if '.' not in fullname: return + packname = fullname.rsplit('.', 1)[0] + if packname != self.name: return + return self + + def load_module(self, fullname): + if fullname in sys.modules: return sys.modules[fullname] + modname = fullname.rsplit('.', 1)[1] + realname = self.impmask % modname + __import__(realname) + module = sys.modules[fullname] = sys.modules[realname] + setattr(self.module, modname, module) + module.__loader__ = self + return module + + + + + + +############################################################################### +# Common Utilities ############################################################# +############################################################################### + + +class MultiDict(DictMixin): + """ This dict stores multiple values per key, but behaves exactly like a + normal dict in that it returns only the newest value for any given key. + There are special methods available to access the full list of values. + """ + + def __init__(self, *a, **k): + self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) + + def __len__(self): return len(self.dict) + def __iter__(self): return iter(self.dict) + def __contains__(self, key): return key in self.dict + def __delitem__(self, key): del self.dict[key] + def __getitem__(self, key): return self.dict[key][-1] + def __setitem__(self, key, value): self.append(key, value) + def keys(self): return self.dict.keys() + + if py3k: + def values(self): return (v[-1] for v in self.dict.values()) + def items(self): return ((k, v[-1]) for k, v in self.dict.items()) + def allitems(self): + return ((k, v) for k, vl in self.dict.items() for v in vl) + iterkeys = keys + itervalues = values + iteritems = items + iterallitems = allitems + + else: + def values(self): return [v[-1] for v in self.dict.values()] + def items(self): return [(k, v[-1]) for k, v in self.dict.items()] + def iterkeys(self): return self.dict.iterkeys() + def itervalues(self): return (v[-1] for v in self.dict.itervalues()) + def iteritems(self): + return ((k, v[-1]) for k, v in self.dict.iteritems()) + def iterallitems(self): + return ((k, v) for k, vl in self.dict.iteritems() for v in vl) + def allitems(self): + return [(k, v) for k, vl in self.dict.iteritems() for v in vl] + + def get(self, key, default=None, index=-1, type=None): + """ Return the most recent value for a key. + + :param default: The default value to be returned if the key is not + present or the type conversion fails. + :param index: An index for the list of available values. + :param type: If defined, this callable is used to cast the value + into a specific type. Exception are suppressed and result in + the default value to be returned. + """ + try: + val = self.dict[key][index] + return type(val) if type else val + except Exception: + pass + return default + + def append(self, key, value): + """ Add a new value to the list of values for this key. """ + self.dict.setdefault(key, []).append(value) + + def replace(self, key, value): + """ Replace the list of values with a single value. """ + self.dict[key] = [value] + + def getall(self, key): + """ Return a (possibly empty) list of values for a key. """ + return self.dict.get(key) or [] + + #: Aliases for WTForms to mimic other multi-dict APIs (Django) + getone = get + getlist = getall + + +class FormsDict(MultiDict): + """ This :class:`MultiDict` subclass is used to store request form data. + Additionally to the normal dict-like item access methods (which return + unmodified data as native strings), this container also supports + attribute-like access to its values. Attributes are automatically de- + or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing + attributes default to an empty string. """ + + #: Encoding used for attribute values. + input_encoding = 'utf8' + #: If true (default), unicode strings are first encoded with `latin1` + #: and then decoded to match :attr:`input_encoding`. + recode_unicode = True + + def _fix(self, s, encoding=None): + if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI + return s.encode('latin1').decode(encoding or self.input_encoding) + elif isinstance(s, bytes): # Python 2 WSGI + return s.decode(encoding or self.input_encoding) + else: + return s + + def decode(self, encoding=None): + """ Returns a copy with all keys and values de- or recoded to match + :attr:`input_encoding`. Some libraries (e.g. WTForms) want a + unicode dictionary. """ + copy = FormsDict() + enc = copy.input_encoding = encoding or self.input_encoding + copy.recode_unicode = False + for key, value in self.allitems(): + copy.append(self._fix(key, enc), self._fix(value, enc)) + return copy + + def getunicode(self, name, default=None, encoding=None): + """ Return the value as a unicode string, or the default. """ + try: + return self._fix(self[name], encoding) + except (UnicodeError, KeyError): + return default + + def __getattr__(self, name, default=unicode()): + # Without this guard, pickle generates a cryptic TypeError: + if name.startswith('__') and name.endswith('__'): + return super(FormsDict, self).__getattr__(name) + return self.getunicode(name, default=default) + + +class HeaderDict(MultiDict): + """ A case-insensitive version of :class:`MultiDict` that defaults to + replace the old value instead of appending it. """ + + def __init__(self, *a, **ka): From noreply at buildbot.pypy.org Tue Apr 22 15:29:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Apr 2014 15:29:16 +0200 (CEST) Subject: [pypy-commit] benchmarks default: use openload to measure throughput Message-ID: <20140422132916.234141D2380@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r251:d3e52a4e640c Date: 2014-04-22 15:30 +0200 http://bitbucket.org/pypy/benchmarks/changeset/d3e52a4e640c/ Log: use openload to measure throughput diff --git a/multithread/bottle/app.py b/multithread/bottle/app.py --- a/multithread/bottle/app.py +++ b/multithread/bottle/app.py @@ -1,10 +1,9 @@ from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool -from SocketServer import ThreadingMixIn -from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler +from BaseHTTPServer import HTTPServer -import threading, socket, time +import threading, time -from wsgiref.simple_server import WSGIRequestHandler, WSGIServer +from wsgiref.simple_server import WSGIRequestHandler class ThreadedHTTPServer(HTTPServer): """Handle requests in a separate thread.""" @@ -44,22 +43,60 @@ -from bottle import route, run, ServerAdapter +import bottle +import subprocess, sys, os -class ThreadedServer(ServerAdapter): +class ThreadedServer(bottle.ServerAdapter): def run(self, app): # pragma: no cover srv = ThreadedHTTPServer((self.host, self.port), WSGIRequestHandler) srv.set_app(app) srv.serve_forever() - at route('/') + at bottle.route('/') def index(): time.sleep(0.5) return "hi from " + threading.currentThread().getName() +def run(threads=4, runtime=10, clients=8): + threads = int(threads) + runtime = int(runtime) + clients = int(clients) + PORT = 21634 + + set_thread_pool(ThreadPool(threads)) + + def bottle_server(): + bottle.run(server=ThreadedServer, + host='localhost', port=PORT) + + bs = threading.Thread(target=bottle_server) + bs.setDaemon(True) + bs.start() + + print "wait for startup" + time.sleep(5) + print "hopefully ready now" + + try: + print "execute openload:" + p = subprocess.Popen(['openload', + '-l', str(runtime), + '-o', 'CSV', + 'localhost:%s' % PORT, str(clients)], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except OSError as e: + sys.stderr.write("Error trying to execute 'openload'\n%s" % e) + os.exit(1) + + returncode = p.wait() + out, err = p.communicate() + if returncode != 0: + sys.stderr.write("'openload' returned an error\n%s" % e) + os.exit(1) + print out, err + if __name__ == "__main__": - set_thread_pool(ThreadPool(8)) - run(server=ThreadedServer, # debug=True, - host='localhost', port=8080) + run() From noreply at buildbot.pypy.org Tue Apr 22 18:07:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 22 Apr 2014 18:07:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: Check for bugs Message-ID: <20140422160710.33F221D24FB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1176:4408f7de0c1b Date: 2014-04-22 18:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/4408f7de0c1b/ Log: Check for bugs diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -353,6 +353,9 @@ mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); +#ifndef NDEBUG + memset((char *)&mscan->d, 0xda, request_size); +#endif lm_unlock(); From noreply at buildbot.pypy.org Tue Apr 22 19:27:00 2014 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 22 Apr 2014 19:27:00 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: proper cleanup by closing files when done Message-ID: <20140422172700.8273C1C070B@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70854:76f6e721510b Date: 2014-04-22 10:24 -0700 http://bitbucket.org/pypy/pypy/changeset/76f6e721510b/ Log: proper cleanup by closing files when done diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -343,6 +343,8 @@ i += 1 assert i == self.N + f.Close() + def test07_write_builtin(self): """Test writing of builtins""" @@ -409,6 +411,7 @@ assert mytree.my_int == i+1 assert mytree.my_int2 == i+1 + f.Close() class AppTestCINTREGRESSION: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) From noreply at buildbot.pypy.org Tue Apr 22 19:27:01 2014 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 22 Apr 2014 19:27:01 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: better handling of tests with different backends Message-ID: <20140422172701.AECA01C070B@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70855:a8c7b05f2335 Date: 2014-04-22 10:24 -0700 http://bitbucket.org/pypy/pypy/changeset/a8c7b05f2335/ Log: better handling of tests with different backends diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -3,18 +3,25 @@ @py.test.mark.tryfirst def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: - if not item.location[0] in ['test_helper.py', 'test_cppyy.py'] or \ - (item.location[0] == 'test_cppyy.py' and not 'TestCPPYYImplementation' in item.location[2]): - py.test.skip("genreflex is not installed") + import pypy.module.cppyy.capi.loadable_capi as lcapi + if 'dummy' in lcapi.reflection_library: + # run only tests that are covered by the dummy backend and tests + # that do not rely on reflex + if not item.location[0] in ['test_helper.py', 'test_cppyy.py'] or \ + (item.location[0] == 'test_cppyy.py' and not 'TestCPPYYImplementation' in item.location[2]): + py.test.skip("genreflex is not installed") +def pytest_configure(config): + if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi try: import ctypes ctypes.CDLL(lcapi.reflection_library) except Exception, e: + # build dummy backend (which has reflex info and calls hard-wired) import os from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.translator import platform + from rpython.translator.platform import platform from rpython.rtyper.lltypesystem import rffi @@ -28,12 +35,9 @@ use_cpp_linker=True, ) - soname = platform.platform.compile( + soname = platform.compile( [], eci, - outputfilename='libcppyy_backend', + outputfilename='libcppyy_dummy_backend', standalone=False) lcapi.reflection_library = str(soname) - - lcapi.isdummy = True - From noreply at buildbot.pypy.org Tue Apr 22 19:27:02 2014 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 22 Apr 2014 19:27:02 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: valgrind-directed cleanup Message-ID: <20140422172702.E3EE51C070B@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70856:a0a53cefce3b Date: 2014-04-22 10:25 -0700 http://bitbucket.org/pypy/pypy/changeset/a0a53cefce3b/ Log: valgrind-directed cleanup diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG +_C_OPAQUE_PTR = rffi.VOIDP _C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -180,7 +180,7 @@ ### TTree -------------------------------------------------------------------- _ttree_Branch = rffi.llexternal( "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], rffi.LONG, + [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, releasegil=False, compilation_info=eci) @@ -299,6 +299,8 @@ # some instance klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) w_obj = klass.construct() + # 0x10000 = kDeleteObject; reset because we own the object + space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) space.call_method(w_branch, "SetObject", w_obj) space.call_method(w_branch, "GetEntry", space.wrap(entry)) space.setattr(w_self, args_w[0], w_obj) diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -139,7 +139,7 @@ from pypy.module.cppyy import interp_cppyy newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args) assert lltype.typeOf(newthis) == capi.C_OBJECT - return space.wrap(newthis) + return space.wrap(rffi.cast(rffi.LONG, newthis)) # really want ptrdiff_t here class InstancePtrExecutor(FunctionExecutor): diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,11 +7,11 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef void* cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; - typedef long cppyy_index_t; + typedef void* cppyy_object_t; + typedef void* cppyy_method_t; + typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); /* name to opaque C++ scope representation -------------------------------- */ diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -132,7 +132,7 @@ # class allows simple aliasing of methods) capi.pythonize(space, cppclass.name, w_pycppclass) state = space.fromcache(State) - state.cppclass_registry[cppclass.handle] = w_pycppclass + state.cppclass_registry[rffi.cast(rffi.LONG, cppclass.handle)] = w_pycppclass class W_CPPLibrary(W_Root): @@ -1154,7 +1154,7 @@ def get_pythonized_cppclass(space, handle): state = space.fromcache(State) try: - w_pycppclass = state.cppclass_registry[handle] + w_pycppclass = state.cppclass_registry[rffi.cast(rffi.LONG, handle)] except KeyError: final_name = capi.c_scoped_final_name(space, handle) # the callback will cache the class by calling register_class diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -46,9 +46,6 @@ extern "C" void G__LockCriticalSection(); extern "C" void G__UnlockCriticalSection(); -#define G__SETMEMFUNCENV (long)0x7fff0035 -#define G__NOP (long)0x7fff00ff - namespace { class Cppyy_OpenedTClass : public TDictionary { @@ -147,11 +144,10 @@ assert(g_classrefs.size() == (ClassRefs_t::size_type)GLOBAL_HANDLE); g_classref_indices[""] = (ClassRefs_t::size_type)GLOBAL_HANDLE; g_classrefs.push_back(TClassRef("")); - g_classref_indices["std"] = g_classrefs.size(); - g_classrefs.push_back(TClassRef("")); // CINT ignores std - g_classref_indices["::std"] = g_classrefs.size(); - g_classrefs.push_back(TClassRef("")); // id. - + // CINT ignores std/::std, so point them to the global namespace + g_classref_indices["std"] = (ClassRefs_t::size_type)GLOBAL_HANDLE; + g_classref_indices["::std"] = (ClassRefs_t::size_type)GLOBAL_HANDLE; + // an offset for the interpreted methods g_interpreted.push_back(G__MethodInfo()); @@ -203,6 +199,7 @@ TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) return (TFunction*)cr->GetListOfMethods()->At(idx); + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return (TFunction*)idx; } @@ -247,6 +244,7 @@ /* not supported as CINT does not store classes hierarchically */ return 0; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return gClassTable->Classes(); } @@ -258,6 +256,7 @@ assert(!"scope name lookup not supported on inner scopes"); return 0; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); std::string name = gClassTable->At(iscope); if (name.find("::") == std::string::npos) return cppstring_to_cstring(name); @@ -411,7 +410,6 @@ G__settemplevel(1); long index = (long)&method; - G__CurrentCall(G__SETMEMFUNCENV, 0, &index); // TODO: access to store_struct_offset won't work on Windows long store_struct_offset = G__store_struct_offset; @@ -425,7 +423,6 @@ if (G__get_return(0) > G__RETURN_NORMAL) G__security_recover(0); // 0 ensures silence - G__CurrentCall(G__NOP, 0, 0); G__settemplevel(-1); G__UnlockCriticalSection(); @@ -514,7 +511,7 @@ R__LOCKGUARD2(gCINTMutex); G__value result = cppyy_call_T(method, self, nargs, args); G__pop_tempobject_nodel(); - return G__int(result); + return (cppyy_object_t)G__int(result); } cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /*handle*/, cppyy_index_t /*idx*/) { @@ -552,7 +549,7 @@ TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetClassInfo()) return cr->Property() & G__BIT_ISNAMESPACE; - if (strcmp(cr.GetClassName(), "") == 0) + if (handle == (cppyy_scope_t)GLOBAL_HANDLE) return true; return false; } @@ -655,7 +652,7 @@ TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfMethods()) return cr->GetListOfMethods()->GetSize(); - else if (strcmp(cr.GetClassName(), "") == 0) { + else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { if (g_globalfuncs.empty()) { TCollection* funcs = gROOT->GetListOfGlobalFunctions(kTRUE); g_globalfuncs.reserve(funcs->GetSize()); @@ -678,6 +675,7 @@ TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) return (cppyy_index_t)imeth; + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return (cppyy_index_t)&g_globalfuncs[imeth]; } @@ -698,14 +696,12 @@ } ++imeth; } - } - - if (result.empty()) { + } else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { TCollection* funcs = gROOT->GetListOfGlobalFunctions(kTRUE); TFunction* func = 0; TIter ifunc(funcs); while ((func = (TFunction*)ifunc.Next())) { - if (strcmp(func->GetName(), name) == 0) { + if (strcmp(name, func->GetName()) == 0) { g_globalfuncs.push_back(*func); result.push_back((cppyy_index_t)func); } @@ -715,7 +711,7 @@ if (result.empty()) return (cppyy_index_t*)0; - cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*result.size()+1); + cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*(result.size()+1)); for (int i = 0; i < (int)result.size(); ++i) llresult[i] = result[i]; llresult[result.size()] = -1; return llresult; @@ -844,7 +840,7 @@ TClassRef& lccr = type_from_handle(lc); TClassRef& rccr = type_from_handle(rc); - if (!lccr.GetClass() || !rccr.GetClass() || scope != GLOBAL_HANDLE) + if (!lccr.GetClass() || !rccr.GetClass() || scope != (cppyy_scope_t)GLOBAL_HANDLE) return (cppyy_index_t)-1; // (void*)-1 is in kernel space, so invalid as a method handle std::string lcname = lccr->GetName(); @@ -893,7 +889,7 @@ TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfDataMembers()) return cr->GetListOfDataMembers()->GetSize(); - else if (strcmp(cr.GetClassName(), "") == 0) { + else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { TCollection* vars = gROOT->GetListOfGlobals(kTRUE); if (g_globalvars.size() != (GlobalVars_t::size_type)vars->GetSize()) { g_globalvars.clear(); @@ -919,6 +915,7 @@ TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return cppstring_to_cstring(m->GetName()); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return cppstring_to_cstring(gbl.GetName()); } @@ -939,6 +936,7 @@ } return cppstring_to_cstring(fullType); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return cppstring_to_cstring(gbl.GetFullTypeName()); } @@ -950,6 +948,7 @@ TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return (size_t)m->GetOffsetCint(); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return (size_t)gbl.GetAddress(); } @@ -980,13 +979,15 @@ } ++idm; } + } else if (handle == (cppyy_type_t)GLOBAL_HANDLE) { + TGlobal* gbl = (TGlobal*)gROOT->GetListOfGlobals(kTRUE)->FindObject(name); + if (!gbl) + return -1; + int idx = g_globalvars.size(); + g_globalvars.push_back(*gbl); + return idx; } - TGlobal* gbl = (TGlobal*)gROOT->GetListOfGlobals(kTRUE)->FindObject(name); - if (!gbl) - return -1; - int idx = g_globalvars.size(); - g_globalvars.push_back(*gbl); - return idx; + return -1; } @@ -998,6 +999,7 @@ TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return m->Property() & G__BIT_ISPUBLIC; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return 1; // global data is always public } @@ -1008,6 +1010,7 @@ TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return m->Property() & G__BIT_ISSTATIC; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return 1; // global data is always static } diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -36,11 +36,11 @@ struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- - static int s_scope_id = 0; - s_handles["example01"] = ++s_scope_id; + static long s_scope_id = 0; + s_handles["example01"] = (cppyy_scope_t)++s_scope_id; const char* methods[] = {"staticAddToDouble"}; Cppyy_PseudoInfo info(1, methods); - s_scopes[s_scope_id] = info; + s_scopes[(cppyy_scope_t)s_scope_id] = info; // -- class example01 } } _init; diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -375,7 +375,7 @@ } if (result.empty()) return (cppyy_index_t*)0; - cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*result.size()+1); + cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*(result.size()+1)); for (int i = 0; i < (int)result.size(); ++i) llresult[i] = result[i]; llresult[result.size()] = -1; return llresult; @@ -481,7 +481,7 @@ return (cppyy_method_t)m.Stubfunction(); } -cppyy_method_t cppyy_get_global_operator(cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op) { +cppyy_index_t cppyy_get_global_operator(cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op) { Reflex::Type lct = type_from_handle(lc); Reflex::Type rct = type_from_handle(rc); Reflex::Scope nss = scope_from_handle(scope); From noreply at buildbot.pypy.org Tue Apr 22 19:46:08 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 22 Apr 2014 19:46:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove class which is unused Message-ID: <20140422174608.DA7291D2380@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70857:3a9f87134e64 Date: 2014-04-22 10:45 -0700 http://bitbucket.org/pypy/pypy/changeset/3a9f87134e64/ Log: Remove class which is unused diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -739,11 +739,6 @@ def __repr__(self): return repr(self.key) -class _r_dictkey_with_hash(_r_dictkey): - def __init__(self, dic, key, hash): - self.dic = dic - self.key = key - self.hash = hash # ____________________________________________________________ From noreply at buildbot.pypy.org Tue Apr 22 20:34:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:00 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: kill rarely used bufferstr_new_w func Message-ID: <20140422183400.208CC1D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70858:d6324ad0ee46 Date: 2014-03-19 02:50 -0400 http://bitbucket.org/pypy/pypy/changeset/d6324ad0ee46/ Log: kill rarely used bufferstr_new_w func diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1333,13 +1333,6 @@ self.wrap('read-write buffer expected')) return buffer - def bufferstr_new_w(self, w_obj): - # Implement the "new buffer interface" (new in Python 2.7) - # returning an unwrapped string. It doesn't accept unicode - # strings - buffer = self.buffer_w(w_obj) - return buffer.as_str() - def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -53,7 +53,7 @@ return space.bufferstr_w(self) def _op_val(self, space, w_other): - return space.bufferstr_new_w(w_other) + return space.buffer_w(w_other).as_str() def _chr(self, char): assert len(char) == 1 @@ -432,12 +432,12 @@ def makebytearraydata_w(space, w_source): # String-like argument try: - string = space.bufferstr_new_w(w_source) + buf = space.buffer_w(w_source) except OperationError as e: if not e.match(space, space.w_TypeError): raise else: - return [c for c in string] + return [c for c in buf.as_str()] # sequence of bytes w_iter = space.iter(w_source) @@ -1055,7 +1055,7 @@ if data and i != 0: newdata.extend(data) - newdata.extend([c for c in space.bufferstr_new_w(w_s)]) + newdata.extend([c for c in space.buffer_w(w_s).as_str()]) return W_BytearrayObject(newdata) _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) From noreply at buildbot.pypy.org Tue Apr 22 20:34:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:01 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: reorganize the objspace buffer api functions Message-ID: <20140422183401.4D9261D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70859:6df3577085b2 Date: 2014-03-19 06:22 -0400 http://bitbucket.org/pypy/pypy/changeset/6df3577085b2/ Log: reorganize the objspace buffer api functions diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -194,13 +194,37 @@ def immutable_unique_id(self, space): return None - def buffer_w(self, space): + def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): - return w_result.buffer_w(space) - self._typed_unwrap_error(space, "buffer") + return w_result.buffer_w(space, flags) + raise oefmt(space.w_TypeError, "'%T' does not have the buffer interface", self) + + def readbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.readbuf_w(space) + self._typed_unwrap_error(space, "readable buffer") + + def writebuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.writebuf_w(space) + self._typed_unwrap_error(space, "writeable buffer") + + def charbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.charbuf_w(space) + self._typed_unwrap_error(space, "character buffer") def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1321,17 +1345,21 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - def buffer_w(self, w_obj): - return w_obj.buffer_w(self) + def buffer_w(self, w_obj, flags): + # New buffer interface, returns a buffer based on flags + return w_obj.buffer_w(self, flags) - def rwbuffer_w(self, w_obj): - # returns a RWBuffer instance - from pypy.interpreter.buffer import RWBuffer - buffer = self.buffer_w(w_obj) - if not isinstance(buffer, RWBuffer): - raise OperationError(self.w_TypeError, - self.wrap('read-write buffer expected')) - return buffer + def readbuf_w(self, w_obj): + # Old buffer interface, returns a readonly buffer + return w_obj.readbuf_w(self) + + def writebuf_w(self, w_obj): + # Old buffer interface, returns a writeable buffer + return w_obj.writebuf_w(self) + + def charbuf_w(self, w_obj): + # Old buffer interface, returns a character buffer + return w_obj.charbuf_w(self) def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a @@ -1347,8 +1375,7 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - buffer = self.buffer_w(w_obj) - return buffer.as_str() + return self.readbuf_w(w_obj).as_str() def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): From noreply at buildbot.pypy.org Tue Apr 22 20:34:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:02 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: enable tests we hope to pass Message-ID: <20140422183402.6BBFD1D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70860:5dc317f9fdd7 Date: 2014-03-19 05:30 -0400 http://bitbucket.org/pypy/pypy/changeset/5dc317f9fdd7/ Log: enable tests we hope to pass diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/pypy/objspace/std/test/test_memoryview.py b/pypy/objspace/std/test/test_memoryview.py --- a/pypy/objspace/std/test/test_memoryview.py +++ b/pypy/objspace/std/test/test_memoryview.py @@ -234,3 +234,7 @@ assert memoryview("abc") == "abc" assert memoryview("abc") == bytearray("abc") assert memoryview("abc") != 3 + assert not memoryview("abc") == u"abc" + assert memoryview("abc") != u"abc" + assert not u"abc" == memoryview("abc") + assert u"abc" != memoryview("abc") From noreply at buildbot.pypy.org Tue Apr 22 20:34:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:03 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: get tests passing Message-ID: <20140422183403.AB2DD1D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70861:f71f35155a2b Date: 2014-03-21 13:23 -0400 http://bitbucket.org/pypy/pypy/changeset/f71f35155a2b/ Log: get tests passing diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1345,6 +1345,11 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) + BUF_SIMPLE = 0 + BUF_FULL_RO = 1 + BUF_CONTIG = 2 + BUF_CONTIG_RO = 3 + def buffer_w(self, w_obj, flags): # New buffer interface, returns a buffer based on flags return w_obj.buffer_w(self, flags) @@ -1377,6 +1382,14 @@ raise return self.readbuf_w(w_obj).as_str() + def bufferchar_w(self, w_obj): + try: + return self.str_w(w_obj) + except OperationError, e: + if not e.match(self, self.w_TypeError): + raise + return self.charbuf_w(w_obj) + def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): return None diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -468,7 +468,7 @@ """readinto() -> Undocumented. Don't use this; it may go away.""" # XXX not the most efficient solution as it doesn't avoid the copying space = self.space - rwbuffer = space.rwbuffer_w(w_rwbuffer) + rwbuffer = space.writebuf_w(w_rwbuffer) w_data = self.file_read(rwbuffer.getlength()) data = space.str_w(w_data) rwbuffer.setslice(0, data) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -80,7 +80,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.buffer_w(w_buffer, space.BUF_CONTIG) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -41,7 +41,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.buffer_w(w_buffer, space.BUF_CONTIG) size = rwbuffer.getlength() output = self.read(size) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -366,7 +366,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.buffer_w(w_buffer, space.BUF_CONTIG) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -122,7 +122,7 @@ @unwrap_spec(offset='index') def recv_bytes_into(self, space, w_buffer, offset=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) length = rwbuffer.getlength() res, newbuf = self.do_recv_string( diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -16,6 +16,7 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.rarithmetic import intmask, r_uint +from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker TYPEMAP = { @@ -352,8 +353,13 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def buffer_w(self, space): - from pypy.module._rawffi.buffer import RawFFIBuffer + def buffer_w(self, space, flags): + return RawFFIBuffer(self) + + def readbuf_w(self, space): + return RawFFIBuffer(self) + + def writebuf_w(self, space): return RawFFIBuffer(self) def getrawsize(self): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1100,6 +1100,12 @@ assert a[3] == 'z' assert a[4] == 't' + b = memoryview(a) + assert len(b) == 10 + assert b[3] == 'z' + b[3] = 'x' + assert b[3] == 'x' + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -419,7 +419,7 @@ @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt @@ -430,7 +430,7 @@ @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -132,7 +132,10 @@ self.len = 0 self.allocated = 0 - def buffer_w(self, space): + def readbuf_w(self, space): + return ArrayBuffer(self) + + def writebuf_w(self, space): return ArrayBuffer(self) def descr_append(self, space, w_x): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -49,7 +49,7 @@ def get_rawbuffer(space, w_obj): try: - buf = space.buffer_w(w_obj) + buf = space.buffer_w(w_obj, space.BUF_SIMPLE) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -151,7 +151,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -192,7 +192,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -209,7 +209,7 @@ if mutate_flag != 0: try: - rwbuffer = space.rwbuffer_w(w_arg) + rwbuffer = space.writebuf_w(w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -220,7 +220,7 @@ space = self.space if space.type(w_obj).is_heaptype(): try: - buf = space.buffer_w(w_obj) + buf = space.readbuf_w(w_obj) except OperationError as e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -342,8 +342,14 @@ def descr_copy(self, space): return self.convert_to(space, self.get_dtype(space)) - def buffer_w(self, space): - return self.descr_ravel(space).buffer_w(space) + def buffer_w(self, space, flags): + return self.descr_ravel(space).buffer_w(space, flags) + + def readbuf_w(self, space): + return self.descr_ravel(space).readbuf_w(space) + + def charbuf_w(self, space): + return self.descr_ravel(space).charbuf_w(space) def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -602,11 +602,20 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ctypes not implemented yet")) - def buffer_w(self, space): + def buffer_w(self, space, flags): return self.implementation.get_buffer(space) + def readbuf_w(self, space): + return self.implementation.get_buffer(space) + + def writebuf_w(self, space): + return self.implementation.get_buffer(space) + + def charbuf_w(self, space): + return self.implementation.get_buffer(space).as_str() + def descr_get_data(self, space): - return space.newbuffer(self.buffer_w(space)) + return space.newbuffer(self.implementation.get_buffer(space)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -1159,7 +1168,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) - buf = space.buffer_w(w_buffer) + try: + buf = space.writebuf_w(w_buffer) + except OperationError: + buf = space.readbuf_w(w_buffer) try: raw_ptr = buf.get_raw_address() except ValueError: diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -17,7 +17,7 @@ self.space = space self.mmap = mmap_obj - def buffer_w(self, space): + def readbuf_w(self, space): self.check_valid() return MMapBuffer(self.space, self.mmap) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,6 +1,7 @@ from rpython.annotator.model import SomeInstance, s_None from pypy.interpreter import argument, gateway from pypy.interpreter.baseobjspace import W_Root, ObjSpace, SpaceCache +from pypy.interpreter.buffer import Buffer from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.sliceobject import W_SliceObject @@ -295,8 +296,11 @@ ec._py_repr = None return ec - def buffer_w(self, w_obj): - from pypy.interpreter.buffer import Buffer + def readbuf_w(self, w_obj): + is_root(w_obj) + return Buffer() + + def writebuf_w(self, w_obj): is_root(w_obj) return Buffer() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -27,8 +27,17 @@ """representation for debugging purposes""" return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) - def buffer_w(w_self, space): - return BytearrayBuffer(w_self.data) + def buffer_w(self, space, flags): + return BytearrayBuffer(self.data) + + def readbuf_w(self, space): + return BytearrayBuffer(self.data) + + def writebuf_w(self, space): + return BytearrayBuffer(self.data) + + def charbuf_w(self, space): + return ''.join(self.data) def _new(self, value): return W_BytearrayObject(_make_data(value)) @@ -50,10 +59,10 @@ return space.wrap(ord(character)) def _val(self, space): - return space.bufferstr_w(self) + return space.buffer_w(self, space.BUF_SIMPLE).as_str() def _op_val(self, space, w_other): - return space.buffer_w(w_other).as_str() + return space.buffer_w(w_other, space.BUF_SIMPLE).as_str() def _chr(self, char): assert len(char) == 1 @@ -432,7 +441,7 @@ def makebytearraydata_w(space, w_source): # String-like argument try: - buf = space.buffer_w(w_source) + buf = space.buffer_w(w_source, space.BUF_FULL_RO) except OperationError as e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -442,8 +442,14 @@ def str_w(self, space): return self._value - def buffer_w(w_self, space): - return StringBuffer(w_self._value) + def buffer_w(self, space, flags): + return StringBuffer(self._value) + + def readbuf_w(self, space): + return StringBuffer(self._value) + + def charbuf_w(self, space): + return self._value def listview_bytes(self): return _create_list_from_bytes(self._value) @@ -471,8 +477,7 @@ return self._value def _op_val(self, space, w_other): - return space.bufferstr_w(w_other) - #return w_other._value + return space.bufferchar_w(w_other) def _chr(self, char): assert len(char) == 1 diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -692,7 +692,7 @@ else: # If object supports the buffer interface try: - buf = space.buffer_w(w_value) + buf = space.charbuf_w(w_value) except OperationError as e: if not e.match(space, space.w_TypeError): raise @@ -700,8 +700,7 @@ "int() argument must be a string or a number, " "not '%T'", w_value) else: - value, w_longval = _string_to_int_or_long(space, w_value, - buf.as_str()) + value, w_longval = _string_to_int_or_long(space, w_value, buf) else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -512,7 +512,7 @@ unicode_to_decimal_w(space, w_value)) else: try: - buf = space.buffer_w(w_value) + buf = space.charbuf_w(w_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -520,8 +520,7 @@ "long() argument must be a string or a number, " "not '%T'", w_value) else: - return _string_to_w_long(space, w_longtype, w_value, - buf.as_str()) + return _string_to_w_long(space, w_longtype, w_value, buf) else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/memoryview.py b/pypy/objspace/std/memoryview.py --- a/pypy/objspace/std/memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -40,9 +40,18 @@ assert isinstance(buf, buffer.Buffer) self.buf = buf - def buffer_w(self, space): + def buffer_w(self, space, flags): return self.buf + def readbuf_w(self, space): + return self.buf + + def writebuf_w(self, space): + return self.buf + + def charbuf_w(self, space): + return self.buf.as_str() + @staticmethod @unwrap_spec(offset=int, size=int) def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): @@ -57,7 +66,7 @@ from pypy.interpreter.buffer import StringBuffer buf = StringBuffer(builder.build()) else: - buf = space.buffer_w(w_object) + buf = space.readbuf_w(w_object) if offset == 0 and size == -1: return W_Buffer(buf) @@ -175,20 +184,12 @@ assert isinstance(buf, buffer.Buffer) self.buf = buf - def buffer_w(self, space): - """ - Note that memoryview() is very inconsistent in CPython: it does not - support the buffer interface but does support the new buffer - interface: as a result, it is possible to pass memoryview to - e.g. socket.send() but not to file.write(). For simplicity and - consistency, in PyPy memoryview DOES support buffer(), which means - that it is accepted in more places than CPython. - """ + def buffer_w(self, space, flags): return self.buf @staticmethod def descr_new_memoryview(space, w_subtype, w_object): - return W_MemoryView(space.buffer_w(w_object)) + return W_MemoryView(space.buffer_w(w_object, space.BUF_FULL_RO)) def _make_descr__cmp(name): def descr__cmp(self, space, w_other): @@ -199,7 +200,7 @@ return space.wrap(getattr(operator, name)(str1, str2)) try: - buf = space.buffer_w(w_other) + buf = space.buffer_w(w_other, space.BUF_CONTIG_RO) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -36,7 +36,10 @@ def str_w(self, space): return self.force() - def buffer_w(self, space): + def buffer_w(self, space, flags): + return StringBuffer(self.force()) + + def readbuf_w(self, space): return StringBuffer(self.force()) def descr_len(self, space): diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -634,7 +634,8 @@ table = maketrans('abc', 'xyz') assert 'xyzxyz' == 'xyzabcdef'.translate(table, 'def') - assert 'xyzxyz' == 'xyzabcdef'.translate(memoryview(table), 'def') + exc = raises(TypeError, "'xyzabcdef'.translate(memoryview(table), 'def')") + assert 'character buffer' in str(exc.value) table = maketrans('a', 'A') assert 'Abc' == 'abc'.translate(table) diff --git a/pypy/objspace/std/test/test_strbufobject.py b/pypy/objspace/std/test/test_strbufobject.py --- a/pypy/objspace/std/test/test_strbufobject.py +++ b/pypy/objspace/std/test/test_strbufobject.py @@ -44,6 +44,11 @@ assert len(r) == 2 assert len(t) == 4 + def test_buffer(self): + s = 'a'.__add__('b') + assert buffer(s) == buffer('ab') + assert memoryview(s) == 'ab' + def test_add_strbuf(self): # make three strbuf objects s = 'a'.__add__('b') From noreply at buildbot.pypy.org Tue Apr 22 20:34:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:04 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: allow w_obj buffer methods to fail with rpython exception Message-ID: <20140422183404.E52B11D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70862:f46d701f1b28 Date: 2014-03-20 16:43 -0400 http://bitbucket.org/pypy/pypy/changeset/f46d701f1b28/ Log: allow w_obj buffer methods to fail with rpython exception diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -200,7 +200,7 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): return w_result.buffer_w(space, flags) - raise oefmt(space.w_TypeError, "'%T' does not have the buffer interface", self) + raise TypeError def readbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') @@ -208,7 +208,7 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): return w_result.readbuf_w(space) - self._typed_unwrap_error(space, "readable buffer") + raise TypeError def writebuf_w(self, space): w_impl = space.lookup(self, '__buffer__') @@ -216,7 +216,7 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): return w_result.writebuf_w(space) - self._typed_unwrap_error(space, "writeable buffer") + raise TypeError def charbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') @@ -224,7 +224,7 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): return w_result.charbuf_w(space) - self._typed_unwrap_error(space, "character buffer") + raise TypeError def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1352,19 +1352,35 @@ def buffer_w(self, w_obj, flags): # New buffer interface, returns a buffer based on flags - return w_obj.buffer_w(self, flags) + try: + return w_obj.buffer_w(self, flags) + except TypeError: + raise oefmt(self.w_TypeError, + "'%T' does not have the buffer interface", w_obj) def readbuf_w(self, w_obj): # Old buffer interface, returns a readonly buffer - return w_obj.readbuf_w(self) + try: + return w_obj.readbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a readable buffer object") def writebuf_w(self, w_obj): # Old buffer interface, returns a writeable buffer - return w_obj.writebuf_w(self) + try: + return w_obj.writebuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a writeable buffer object") def charbuf_w(self, w_obj): # Old buffer interface, returns a character buffer - return w_obj.charbuf_w(self) + try: + return w_obj.charbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a character buffer object") def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -635,7 +635,7 @@ table = maketrans('abc', 'xyz') assert 'xyzxyz' == 'xyzabcdef'.translate(table, 'def') exc = raises(TypeError, "'xyzabcdef'.translate(memoryview(table), 'def')") - assert 'character buffer' in str(exc.value) + assert str(exc.value) == 'expected a character buffer object' table = maketrans('a', 'A') assert 'Abc' == 'abc'.translate(table) From noreply at buildbot.pypy.org Tue Apr 22 20:34:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:06 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: split W_Buffer/W_MemoryView implementation files Message-ID: <20140422183406.159301D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70863:0410fe168a2e Date: 2014-03-21 14:31 -0400 http://bitbucket.org/pypy/pypy/changeset/0410fe168a2e/ Log: split W_Buffer/W_MemoryView implementation files diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -3,7 +3,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,7 +22,6 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod -from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject @@ -474,7 +473,7 @@ "PyLong_Type": "space.w_long", "PyComplex_Type": "space.w_complex", "PyByteArray_Type": "space.w_bytearray", - "PyMemoryView_Type": "space.gettypeobject(W_MemoryView.typedef)", + "PyMemoryView_Type": "space.w_memoryview", "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", "PyBaseObject_Type": "space.w_object", 'PyNone_Type': 'space.type(space.w_None)', diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -6,7 +6,7 @@ PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref from pypy.module.array.interp_array import ArrayBuffer -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/bufferobject.py @@ -0,0 +1,157 @@ +""" +Implementation of the 'buffer' and 'memoryview' types. +""" +import operator + +from pypy.interpreter import buffer +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef +from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.rstring import StringBuilder +from pypy.objspace.std.memoryobject import _buffer_setitem + + +class W_Buffer(W_Root): + """Implement the built-in 'buffer' type as a wrapper around + an interp-level buffer. + """ + + def __init__(self, buf): + assert isinstance(buf, buffer.Buffer) + self.buf = buf + + def buffer_w(self, space, flags): + return self.buf + + def readbuf_w(self, space): + return self.buf + + def writebuf_w(self, space): + return self.buf + + def charbuf_w(self, space): + return self.buf.as_str() + + @staticmethod + @unwrap_spec(offset=int, size=int) + def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): + if space.isinstance_w(w_object, space.w_unicode): + # unicode objects support the old buffer interface + # but not the new buffer interface (change in python 2.7) + from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE + unistr = space.unicode_w(w_object) + builder = StringBuilder(len(unistr) * UNICODE_SIZE) + for unich in unistr: + pack_unichar(unich, builder) + from pypy.interpreter.buffer import StringBuffer + buf = StringBuffer(builder.build()) + else: + buf = space.readbuf_w(w_object) + + if offset == 0 and size == -1: + return W_Buffer(buf) + # handle buffer slices + if offset < 0: + raise OperationError(space.w_ValueError, + space.wrap("offset must be zero or positive")) + if size < -1: + raise OperationError(space.w_ValueError, + space.wrap("size must be zero or positive")) + if isinstance(buf, buffer.RWBuffer): + buf = buffer.RWSubBuffer(buf, offset, size) + else: + buf = buffer.SubBuffer(buf, offset, size) + return W_Buffer(buf) + + def descr_len(self, space): + return space.wrap(self.buf.getlength()) + + def descr_getitem(self, space, w_index): + start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + if step == 0: # index only + return space.wrap(self.buf.getitem(start)) + res = self.buf.getslice(start, stop, step, size) + return space.wrap(res) + + @unwrap_spec(newstring='bufferstr') + def descr_setitem(self, space, w_index, newstring): + if not isinstance(self.buf, buffer.RWBuffer): + raise OperationError(space.w_TypeError, + space.wrap("buffer is read-only")) + _buffer_setitem(space, self.buf, w_index, newstring) + + def descr_str(self, space): + return space.wrap(self.buf.as_str()) + + @unwrap_spec(other='bufferstr') + def descr_add(self, space, other): + return space.wrap(self.buf.as_str() + other) + + def _make_descr__cmp(name): + def descr__cmp(self, space, w_other): + if not isinstance(w_other, W_Buffer): + return space.w_NotImplemented + # xxx not the most efficient implementation + str1 = self.buf.as_str() + str2 = w_other.buf.as_str() + return space.wrap(getattr(operator, name)(str1, str2)) + descr__cmp.func_name = name + return descr__cmp + + descr_eq = _make_descr__cmp('eq') + descr_ne = _make_descr__cmp('ne') + descr_lt = _make_descr__cmp('lt') + descr_le = _make_descr__cmp('le') + descr_gt = _make_descr__cmp('gt') + descr_ge = _make_descr__cmp('ge') + + def descr_hash(self, space): + return space.wrap(compute_hash(self.buf.as_str())) + + def descr_mul(self, space, w_times): + # xxx not the most efficient implementation + w_string = space.wrap(self.buf.as_str()) + # use the __mul__ method instead of space.mul() so that we + # return NotImplemented instead of raising a TypeError + return space.call_method(w_string, '__mul__', w_times) + + def descr_repr(self, space): + if isinstance(self.buf, buffer.RWBuffer): + info = 'read-write buffer' + else: + info = 'read-only buffer' + addrstring = self.getaddrstring(space) + + return space.wrap("<%s for 0x%s, size %d>" % + (info, addrstring, self.buf.getlength())) + +W_Buffer.typedef = TypeDef( + "buffer", + __doc__ = """\ +buffer(object [, offset[, size]]) + +Create a new buffer object which references the given object. +The buffer will reference a slice of the target object from the +start of the object (or at the specified offset). The slice will +extend to the end of the target object (or with the specified size). +""", + __new__ = interp2app(W_Buffer.descr_new_buffer), + __len__ = interp2app(W_Buffer.descr_len), + __getitem__ = interp2app(W_Buffer.descr_getitem), + __setitem__ = interp2app(W_Buffer.descr_setitem), + __str__ = interp2app(W_Buffer.descr_str), + __add__ = interp2app(W_Buffer.descr_add), + __eq__ = interp2app(W_Buffer.descr_eq), + __ne__ = interp2app(W_Buffer.descr_ne), + __lt__ = interp2app(W_Buffer.descr_lt), + __le__ = interp2app(W_Buffer.descr_le), + __gt__ = interp2app(W_Buffer.descr_gt), + __ge__ = interp2app(W_Buffer.descr_ge), + __hash__ = interp2app(W_Buffer.descr_hash), + __mul__ = interp2app(W_Buffer.descr_mul), + __rmul__ = interp2app(W_Buffer.descr_mul), + __repr__ = interp2app(W_Buffer.descr_repr), +) +W_Buffer.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/memoryview.py b/pypy/objspace/std/memoryobject.py rename from pypy/objspace/std/memoryview.py rename to pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryview.py +++ b/pypy/objspace/std/memoryobject.py @@ -8,8 +8,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from rpython.rlib.objectmodel import compute_hash -from rpython.rlib.rstring import StringBuilder def _buffer_setitem(space, buf, w_index, newstring): @@ -31,150 +29,6 @@ " slicing with a step")) -class W_Buffer(W_Root): - """Implement the built-in 'buffer' type as a wrapper around - an interp-level buffer. - """ - - def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) - self.buf = buf - - def buffer_w(self, space, flags): - return self.buf - - def readbuf_w(self, space): - return self.buf - - def writebuf_w(self, space): - return self.buf - - def charbuf_w(self, space): - return self.buf.as_str() - - @staticmethod - @unwrap_spec(offset=int, size=int) - def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - buf = StringBuffer(builder.build()) - else: - buf = space.readbuf_w(w_object) - - if offset == 0 and size == -1: - return W_Buffer(buf) - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buf, buffer.RWBuffer): - buf = buffer.RWSubBuffer(buf, offset, size) - else: - buf = buffer.SubBuffer(buf, offset, size) - return W_Buffer(buf) - - def descr_len(self, space): - return space.wrap(self.buf.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) - if step == 0: # index only - return space.wrap(self.buf.getitem(start)) - res = self.buf.getslice(start, stop, step, size) - return space.wrap(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self.buf, buffer.RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - _buffer_setitem(space, self.buf, w_index, newstring) - - def descr_str(self, space): - return space.wrap(self.buf.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrap(self.buf.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, W_Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.buf.as_str() - str2 = w_other.buf.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.buf.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrap(self.buf.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self.buf, buffer.RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.buf.getlength())) - -W_Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(W_Buffer.descr_new_buffer), - __len__ = interp2app(W_Buffer.descr_len), - __getitem__ = interp2app(W_Buffer.descr_getitem), - __setitem__ = interp2app(W_Buffer.descr_setitem), - __str__ = interp2app(W_Buffer.descr_str), - __add__ = interp2app(W_Buffer.descr_add), - __eq__ = interp2app(W_Buffer.descr_eq), - __ne__ = interp2app(W_Buffer.descr_ne), - __lt__ = interp2app(W_Buffer.descr_lt), - __le__ = interp2app(W_Buffer.descr_le), - __gt__ = interp2app(W_Buffer.descr_gt), - __ge__ = interp2app(W_Buffer.descr_ge), - __hash__ = interp2app(W_Buffer.descr_hash), - __mul__ = interp2app(W_Buffer.descr_mul), - __rmul__ = interp2app(W_Buffer.descr_mul), - __repr__ = interp2app(W_Buffer.descr_repr), -) -W_Buffer.typedef.acceptable_as_base_class = False - - class W_MemoryView(W_Root): """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -63,7 +63,8 @@ from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject - from pypy.objspace.std.memoryview import W_Buffer, W_MemoryView + from pypy.objspace.std import bufferobject + from pypy.objspace.std import memoryobject import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods @@ -83,8 +84,8 @@ self.pythontypes.append(intobject.W_IntObject.typedef) self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) - self.pythontypes.append(W_Buffer.typedef) - self.pythontypes.append(W_MemoryView.typedef) + self.pythontypes.append(bufferobject.W_Buffer.typedef) + self.pythontypes.append(memoryobject.W_MemoryView.typedef) # the set of implementation types self.typeorder = { diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -24,7 +24,7 @@ from pypy.objspace.std.iterobject import W_AbstractSeqIterObject from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.longobject import W_LongObject, newlong -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.iterobject import W_SeqIterObject diff --git a/pypy/objspace/std/test/test_bufferobject.py b/pypy/objspace/std/test/test_bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_bufferobject.py @@ -0,0 +1,190 @@ +class AppTestBuffer: + spaceconfig = dict(usemodules=['array']) + + def test_init(self): + import sys + class A(object): + def __buffer__(self): + return buffer('123') + if '__pypy__' not in sys.builtin_module_names: + raises(TypeError, buffer, A()) + else: + assert buffer(A()) == buffer('123') + + def test_unicode_buffer(self): + import sys + b = buffer(u"ab") + if sys.maxunicode == 65535: # UCS2 build + assert len(b) == 4 + if sys.byteorder == "big": + assert b[0:4] == "\x00a\x00b" + else: + assert b[0:4] == "a\x00b\x00" + else: # UCS4 build + assert len(b) == 8 + if sys.byteorder == "big": + assert b[0:8] == "\x00\x00\x00a\x00\x00\x00b" + else: + assert b[0:8] == "a\x00\x00\x00b\x00\x00\x00" + + def test_array_buffer(self): + import array + b = buffer(array.array("B", [1, 2, 3])) + assert len(b) == 3 + assert b[0:3] == "\x01\x02\x03" + + def test_nonzero(self): + assert buffer('\x00') + assert not buffer('') + import array + assert buffer(array.array("B", [0])) + assert not buffer(array.array("B", [])) + + def test_str(self): + assert str(buffer('hello')) == 'hello' + + def test_repr(self): + # from 2.5.2 lib tests + assert repr(buffer('hello')).startswith(' buffer('ab')) + assert buffer('ab') >= buffer('ab') + assert buffer('ab') != buffer('abc') + assert buffer('ab') < buffer('abc') + assert buffer('ab') <= buffer('ab') + assert buffer('ab') > buffer('aa') + assert buffer('ab') >= buffer('ab') + + def test_hash(self): + assert hash(buffer('hello')) == hash('hello') + + def test_mul(self): + assert buffer('ab') * 5 == 'ababababab' + assert buffer('ab') * (-2) == '' + assert 5 * buffer('ab') == 'ababababab' + assert (-2) * buffer('ab') == '' + + def test_offset_size(self): + b = buffer('hello world', 6) + assert len(b) == 5 + assert b[0] == 'w' + assert b[:] == 'world' + raises(IndexError, 'b[5]') + b = buffer(b, 2) + assert len(b) == 3 + assert b[0] == 'r' + assert b[:] == 'rld' + raises(IndexError, 'b[3]') + b = buffer('hello world', 1, 8) + assert len(b) == 8 + assert b[0] == 'e' + assert b[:] == 'ello wor' + raises(IndexError, 'b[8]') + b = buffer(b, 2, 3) + assert len(b) == 3 + assert b[2] == ' ' + assert b[:] == 'lo ' + raises(IndexError, 'b[3]') + b = buffer('hello world', 55) + assert len(b) == 0 + assert b[:] == '' + b = buffer('hello world', 6, 999) + assert len(b) == 5 + assert b[:] == 'world' + + raises(ValueError, buffer, "abc", -1) + raises(ValueError, buffer, "abc", 0, -2) + + def test_rw_offset_size(self): + import array + + a = array.array("c", 'hello world') + b = buffer(a, 6) + assert len(b) == 5 + assert b[0] == 'w' + assert b[:] == 'world' + raises(IndexError, 'b[5]') + b[0] = 'W' + assert str(b) == 'World' + assert a.tostring() == 'hello World' + b[:] = '12345' + assert a.tostring() == 'hello 12345' + raises(IndexError, 'b[5] = "."') + b[4:2] = '' + assert a.tostring() == 'hello 12345' + + b = buffer(b, 2) + assert len(b) == 3 + assert b[0] == '3' + assert b[:] == '345' + raises(IndexError, 'b[3]') + b[1] = 'X' + assert a.tostring() == 'hello 123X5' + raises(IndexError, 'b[3] = "."') + + a = array.array("c", 'hello world') + b = buffer(a, 1, 8) + assert len(b) == 8 + assert b[0] == 'e' + assert b[:] == 'ello wor' + raises(IndexError, 'b[8]') + b[0] = 'E' + assert str(b) == 'Ello wor' + assert a.tostring() == 'hEllo world' + b[:] = '12345678' + assert a.tostring() == 'h12345678ld' + raises(IndexError, 'b[8] = "."') + + b = buffer(b, 2, 3) + assert len(b) == 3 + assert b[2] == '5' + assert b[:] == '345' + raises(IndexError, 'b[3]') + b[1] = 'X' + assert a.tostring() == 'h123X5678ld' + raises(IndexError, 'b[3] = "."') + + b = buffer(a, 55) + assert len(b) == 0 + assert b[:] == '' + b = buffer(a, 6, 999) + assert len(b) == 5 + assert b[:] == '678ld' + + raises(ValueError, buffer, a, -1) + raises(ValueError, buffer, a, 0, -2) + + def test_slice(self): + # Test extended slicing by comparing with list slicing. + s = "".join(chr(c) for c in list(range(255, -1, -1))) + b = buffer(s) + indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300) + for start in indices: + for stop in indices: + # Skip step 0 (invalid) + for step in indices[1:]: + assert b[start:stop:step] == s[start:stop:step] + + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + buf = buffer('hello world') + raises(TypeError, "buf[MyInt(0)]") + raises(TypeError, "buf[MyInt(0):MyInt(5)]") diff --git a/pypy/objspace/std/test/test_memoryview.py b/pypy/objspace/std/test/test_memoryobject.py rename from pypy/objspace/std/test/test_memoryview.py rename to pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryview.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -1,195 +1,3 @@ -class AppTestBuffer: - spaceconfig = dict(usemodules=['array']) - - def test_init(self): - import sys - class A(object): - def __buffer__(self): - return buffer('123') - if '__pypy__' not in sys.builtin_module_names: - raises(TypeError, buffer, A()) - else: - assert buffer(A()) == buffer('123') - - def test_unicode_buffer(self): - import sys - b = buffer(u"ab") - if sys.maxunicode == 65535: # UCS2 build - assert len(b) == 4 - if sys.byteorder == "big": - assert b[0:4] == "\x00a\x00b" - else: - assert b[0:4] == "a\x00b\x00" - else: # UCS4 build - assert len(b) == 8 - if sys.byteorder == "big": - assert b[0:8] == "\x00\x00\x00a\x00\x00\x00b" - else: - assert b[0:8] == "a\x00\x00\x00b\x00\x00\x00" - - def test_array_buffer(self): - import array - b = buffer(array.array("B", [1, 2, 3])) - assert len(b) == 3 - assert b[0:3] == "\x01\x02\x03" - - def test_nonzero(self): - assert buffer('\x00') - assert not buffer('') - import array - assert buffer(array.array("B", [0])) - assert not buffer(array.array("B", [])) - - def test_str(self): - assert str(buffer('hello')) == 'hello' - - def test_repr(self): - # from 2.5.2 lib tests - assert repr(buffer('hello')).startswith(' buffer('ab')) - assert buffer('ab') >= buffer('ab') - assert buffer('ab') != buffer('abc') - assert buffer('ab') < buffer('abc') - assert buffer('ab') <= buffer('ab') - assert buffer('ab') > buffer('aa') - assert buffer('ab') >= buffer('ab') - - def test_hash(self): - assert hash(buffer('hello')) == hash('hello') - - def test_mul(self): - assert buffer('ab') * 5 == 'ababababab' - assert buffer('ab') * (-2) == '' - assert 5 * buffer('ab') == 'ababababab' - assert (-2) * buffer('ab') == '' - - def test_offset_size(self): - b = buffer('hello world', 6) - assert len(b) == 5 - assert b[0] == 'w' - assert b[:] == 'world' - raises(IndexError, 'b[5]') - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == 'r' - assert b[:] == 'rld' - raises(IndexError, 'b[3]') - b = buffer('hello world', 1, 8) - assert len(b) == 8 - assert b[0] == 'e' - assert b[:] == 'ello wor' - raises(IndexError, 'b[8]') - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == ' ' - assert b[:] == 'lo ' - raises(IndexError, 'b[3]') - b = buffer('hello world', 55) - assert len(b) == 0 - assert b[:] == '' - b = buffer('hello world', 6, 999) - assert len(b) == 5 - assert b[:] == 'world' - - raises(ValueError, buffer, "abc", -1) - raises(ValueError, buffer, "abc", 0, -2) - - def test_rw_offset_size(self): - import array - - a = array.array("c", 'hello world') - b = buffer(a, 6) - assert len(b) == 5 - assert b[0] == 'w' - assert b[:] == 'world' - raises(IndexError, 'b[5]') - b[0] = 'W' - assert str(b) == 'World' - assert a.tostring() == 'hello World' - b[:] = '12345' - assert a.tostring() == 'hello 12345' - raises(IndexError, 'b[5] = "."') - b[4:2] = '' - assert a.tostring() == 'hello 12345' - - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == '3' - assert b[:] == '345' - raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'hello 123X5' - raises(IndexError, 'b[3] = "."') - - a = array.array("c", 'hello world') - b = buffer(a, 1, 8) - assert len(b) == 8 - assert b[0] == 'e' - assert b[:] == 'ello wor' - raises(IndexError, 'b[8]') - b[0] = 'E' - assert str(b) == 'Ello wor' - assert a.tostring() == 'hEllo world' - b[:] = '12345678' - assert a.tostring() == 'h12345678ld' - raises(IndexError, 'b[8] = "."') - - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == '5' - assert b[:] == '345' - raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'h123X5678ld' - raises(IndexError, 'b[3] = "."') - - b = buffer(a, 55) - assert len(b) == 0 - assert b[:] == '' - b = buffer(a, 6, 999) - assert len(b) == 5 - assert b[:] == '678ld' - - raises(ValueError, buffer, a, -1) - raises(ValueError, buffer, a, 0, -2) - - def test_slice(self): - # Test extended slicing by comparing with list slicing. - s = "".join(chr(c) for c in list(range(255, -1, -1))) - b = buffer(s) - indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300) - for start in indices: - for stop in indices: - # Skip step 0 (invalid) - for step in indices[1:]: - assert b[start:stop:step] == s[start:stop:step] - - def test_getitem_only_ints(self): - class MyInt(object): - def __init__(self, x): - self.x = x - - def __int__(self): - return self.x - - buf = buffer('hello world') - raises(TypeError, "buf[MyInt(0)]") - raises(TypeError, "buf[MyInt(0):MyInt(5)]") - - class AppTestMemoryView: def test_basic(self): v = memoryview("abc") From noreply at buildbot.pypy.org Tue Apr 22 20:34:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:07 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: use a readonly flag on buffers rather than separate classes Message-ID: <20140422183407.422371D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70864:a7949cb30472 Date: 2014-03-25 19:20 -0400 http://bitbucket.org/pypy/pypy/changeset/a7949cb30472/ Log: use a readonly flag on buffers rather than separate classes diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -6,7 +6,8 @@ class Buffer(object): """Abstract base class for buffers.""" - __slots__ = [] + __slots__ = ['readonly'] + _immutable_ = True def getlength(self): raise NotImplementedError @@ -24,20 +25,6 @@ # May be overridden. No bounds checks. return ''.join([self.getitem(i) for i in range(start, stop, step)]) - def get_raw_address(self): - raise ValueError("no raw buffer") - - def is_writable(self): - return False - - -class RWBuffer(Buffer): - """Abstract base class for read-write buffers.""" - __slots__ = [] - - def is_writable(self): - return True - def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. @@ -47,12 +34,20 @@ for i in range(len(string)): self.setitem(start + i, string[i]) + def get_raw_address(self): + raise ValueError("no raw buffer") + + def is_writable(self): + return not self.readonly + class StringBuffer(Buffer): __slots__ = ['value'] + _immutable_ = True def __init__(self, value): self.value = value + self.readonly = True def getlength(self): return len(self.value) @@ -70,13 +65,14 @@ assert 0 <= start <= stop return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) -# ____________________________________________________________ -class SubBufferMixin(object): - _attrs_ = ['buffer', 'offset', 'size'] +class SubBuffer(Buffer): + __slots__ = ['buffer', 'offset', 'size'] + _immutable_ = True def __init__(self, buffer, offset, size): + self.readonly = buffer.readonly self.buffer = buffer self.offset = offset self.size = size @@ -100,14 +96,6 @@ return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) - -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) - - -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) - def setitem(self, index, char): self.buffer.setitem(self.offset + index, char) diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,13 +2,16 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec -class ByteBuffer(RWBuffer): +class ByteBuffer(Buffer): + _immutable_ = True + def __init__(self, len): self.data = ['\x00'] * len + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,4 +1,4 @@ -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr @@ -10,12 +10,13 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw -class LLBuffer(RWBuffer): +class LLBuffer(Buffer): _immutable_ = True def __init__(self, raw_cdata, size): self.raw_cdata = raw_cdata self.size = size + self.readonly = False def getlength(self): return self.size @@ -32,7 +33,7 @@ def getslice(self, start, stop, step, size): if step == 1: return rffi.charpsize2str(rffi.ptradd(self.raw_cdata, start), size) - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -101,11 +101,14 @@ readinto = interp2app(W_BufferedIOBase.readinto_w), ) -class RawBuffer(RWBuffer): +class RawBuffer(Buffer): + _immutable_ = True + def __init__(self, buf, start, length): self.buf = buf self.start = start self.length = length + self.readonly = False def getlength(self): return self.length diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,12 +1,14 @@ -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer # XXX not the most efficient implementation -class RawFFIBuffer(RWBuffer): +class RawFFIBuffer(Buffer): + _immutable_ = True def __init__(self, datainstance): self.datainstance = datainstance + self.readonly = False def getlength(self): return self.datainstance.getrawsize() diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -9,7 +9,7 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( interp2app, interpindirect2app, unwrap_spec) @@ -133,10 +133,10 @@ self.allocated = 0 def readbuf_w(self, space): - return ArrayBuffer(self) + return ArrayBuffer(self, True) def writebuf_w(self, space): - return ArrayBuffer(self) + return ArrayBuffer(self, False) def descr_append(self, space, w_x): """ append(x) @@ -586,9 +586,12 @@ v.typecode = k unroll_typecodes = unrolling_iterable(types.keys()) -class ArrayBuffer(RWBuffer): - def __init__(self, array): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, array, readonly): self.array = array + self.readonly = readonly def getlength(self): return self.array.len * self.array.itemsize diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -421,12 +421,8 @@ def test_buffer_write(self): a = self.array('c', 'hello') buf = buffer(a) - print repr(buf) - try: - buf[3] = 'L' - except TypeError: - skip("buffer(array) returns a read-only buffer on CPython") - assert a.tostring() == 'helLo' + exc = raises(TypeError, "buf[3] = 'L'") + assert str(exc.value) == "buffer is read-only" def test_buffer_keepalive(self): buf = buffer(self.array('c', 'text')) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -230,11 +230,13 @@ class CPyBuffer(Buffer): # Similar to Py_buffer + _immutable_ = True def __init__(self, ptr, size, w_obj): self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive + self.readonly = True def getlength(self): return self.size diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -53,8 +53,11 @@ def test_buffer(self): module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) + buf = buffer(arr) + exc = raises(TypeError, "buf[1] = '1'") + assert str(exc.value) == "buffer is read-only" # XXX big-endian - assert str(buffer(arr)) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,4 +1,4 @@ -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized @@ -314,8 +314,8 @@ def get_storage(self): return self.storage - def get_buffer(self, space): - return ArrayBuffer(self) + def get_buffer(self, space, readonly): + return ArrayBuffer(self, readonly) def astype(self, space, dtype): strides, backstrides = calc_strides(self.get_shape(), dtype, @@ -469,9 +469,12 @@ free_raw_storage(self.storage) -class ArrayBuffer(RWBuffer): - def __init__(self, impl): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, impl, readonly): self.impl = impl + self.readonly = readonly def getitem(self, item): return raw_storage_getitem(lltype.Char, self.impl.storage, item) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -603,19 +603,19 @@ "ctypes not implemented yet")) def buffer_w(self, space, flags): - return self.implementation.get_buffer(space) + return self.implementation.get_buffer(space, True) def readbuf_w(self, space): - return self.implementation.get_buffer(space) + return self.implementation.get_buffer(space, True) def writebuf_w(self, space): - return self.implementation.get_buffer(space) + return self.implementation.get_buffer(space, False) def charbuf_w(self, space): - return self.implementation.get_buffer(space).as_str() + return self.implementation.get_buffer(space, True).as_str() def descr_get_data(self, space): - return space.newbuffer(self.implementation.get_buffer(space)) + return space.newbuffer(self.implementation.get_buffer(space, False)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -356,6 +356,9 @@ a = np.array([1,2,3]) b = buffer(a) assert type(b) is buffer + assert 'read-only buffer' in repr(b) + exc = raises(TypeError, "b[0] = '0'") + assert str(exc.value) == 'buffer is read-only' def test_type(self): from numpypy import array @@ -2243,6 +2246,7 @@ a.data[4] = '\xff' assert a[1] == 0xff assert len(a.data) == 16 + assert type(a.data) is buffer def test_explicit_dtype_conversion(self): from numpypy import array diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer from rpython.rlib import rmmap, rarithmetic from rpython.rlib.rmmap import RValueError, RTypeError, RMMapError @@ -19,7 +19,7 @@ def readbuf_w(self, space): self.check_valid() - return MMapBuffer(self.space, self.mmap) + return MMapBuffer(self.space, self.mmap, True) def close(self): self.mmap.close() @@ -286,10 +286,13 @@ mmap_error._dont_inline_ = True -class MMapBuffer(RWBuffer): - def __init__(self, space, mmap): +class MMapBuffer(Buffer): + _immutable_ = True + + def __init__(self, space, mmap, readonly): self.space = space self.mmap = mmap + self.readonly = readonly def getlength(self): return self.mmap.size @@ -303,7 +306,7 @@ if step == 1: return self.mmap.getslice(start, size) else: - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setitem(self, index, char): self.check_valid_writeable() @@ -313,14 +316,6 @@ self.check_valid_writeable() self.mmap.setslice(start, string) - def is_writable(self): - try: - self.mmap.check_writeable() - except RMMapError: - return False - else: - return True - def get_raw_address(self): self.check_valid() return self.mmap.data diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -560,14 +560,24 @@ m = mmap(f.fileno(), 6) m[5] = '?' b = buffer(m) - try: - b[:3] = "FOO" - except TypeError: # on CPython: "buffer is read-only" :-/ - skip("on CPython: buffer is read-only") + exc = raises(TypeError, 'b[:3] = "FOO"') + assert str(exc.value) == "buffer is read-only" m.close() f.seek(0) got = f.read() - assert got == "FOOba?" + assert got == "fooba?" + f.close() + + def test_memoryview(self): + from mmap import mmap + f = open(self.tmpname + "y", "w+") + f.write("foobar") + f.flush() + m = mmap(f.fileno(), 6) + m[5] = '?' + exc = raises(TypeError, memoryview, m) + assert 'buffer interface' in str(exc.value) + m.close() f.close() def test_offset(self): diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -59,10 +59,7 @@ if size < -1: raise OperationError(space.w_ValueError, space.wrap("size must be zero or positive")) - if isinstance(buf, buffer.RWBuffer): - buf = buffer.RWSubBuffer(buf, offset, size) - else: - buf = buffer.SubBuffer(buf, offset, size) + buf = buffer.SubBuffer(buf, offset, size) return W_Buffer(buf) def descr_len(self, space): @@ -77,7 +74,7 @@ @unwrap_spec(newstring='bufferstr') def descr_setitem(self, space, w_index, newstring): - if not isinstance(self.buf, buffer.RWBuffer): + if not self.buf.is_writable(): raise OperationError(space.w_TypeError, space.wrap("buffer is read-only")) _buffer_setitem(space, self.buf, w_index, newstring) @@ -118,7 +115,7 @@ return space.call_method(w_string, '__mul__', w_times) def descr_repr(self, space): - if isinstance(self.buf, buffer.RWBuffer): + if self.buf.is_writable(): info = 'read-write buffer' else: info = 'read-only buffer' diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -5,7 +5,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.signature import Signature @@ -1150,9 +1150,12 @@ start += step -class BytearrayBuffer(RWBuffer): +class BytearrayBuffer(Buffer): + _immutable_ = True + def __init__(self, data): self.data = data + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -85,11 +85,7 @@ size = stop - start if size < 0: size = 0 - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf = buffer.RWSubBuffer(buf, start, size) - else: - buf = buffer.SubBuffer(buf, start, size) + buf = buffer.SubBuffer(self.buf, start, size) return W_MemoryView(buf) def descr_tobytes(self, space): @@ -116,7 +112,7 @@ @unwrap_spec(newstring='bufferstr') def descr_setitem(self, space, w_index, newstring): - if not isinstance(self.buf, buffer.RWBuffer): + if not self.buf.is_writable(): raise OperationError(space.w_TypeError, space.wrap("cannot modify read-only memory")) _buffer_setitem(space, self.buf, w_index, newstring) @@ -134,7 +130,7 @@ return space.wrap(1) def w_is_readonly(self, space): - return space.wrap(not isinstance(self.buf, buffer.RWBuffer)) + return space.wrap(not self.buf.is_writable()) def w_get_shape(self, space): return space.newtuple([space.wrap(self.getlength())]) diff --git a/pypy/objspace/std/test/test_bufferobject.py b/pypy/objspace/std/test/test_bufferobject.py --- a/pypy/objspace/std/test/test_bufferobject.py +++ b/pypy/objspace/std/test/test_bufferobject.py @@ -116,23 +116,27 @@ assert b[0] == 'w' assert b[:] == 'world' raises(IndexError, 'b[5]') - b[0] = 'W' - assert str(b) == 'World' - assert a.tostring() == 'hello World' - b[:] = '12345' - assert a.tostring() == 'hello 12345' - raises(IndexError, 'b[5] = "."') - b[4:2] = '' - assert a.tostring() == 'hello 12345' + exc = raises(TypeError, "b[0] = 'W'") + assert str(exc.value) == "buffer is read-only" + exc = raises(TypeError, "b[:] = '12345'") + assert str(exc.value) == "buffer is read-only" + exc = raises(TypeError, 'b[5] = "."') + assert str(exc.value) == "buffer is read-only" + exc = raises(TypeError, "b[4:2] = ''") + assert str(exc.value) == "buffer is read-only" + assert str(b) == 'world' + assert a.tostring() == 'hello world' b = buffer(b, 2) assert len(b) == 3 - assert b[0] == '3' - assert b[:] == '345' + assert b[0] == 'r' + assert b[:] == 'rld' raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'hello 123X5' - raises(IndexError, 'b[3] = "."') + exc = raises(TypeError, "b[1] = 'X'") + assert str(exc.value) == "buffer is read-only" + exc = raises(TypeError, 'b[3] = "."') + assert str(exc.value) == "buffer is read-only" + assert a.tostring() == 'hello world' a = array.array("c", 'hello world') b = buffer(a, 1, 8) @@ -140,28 +144,33 @@ assert b[0] == 'e' assert b[:] == 'ello wor' raises(IndexError, 'b[8]') - b[0] = 'E' - assert str(b) == 'Ello wor' - assert a.tostring() == 'hEllo world' - b[:] = '12345678' - assert a.tostring() == 'h12345678ld' - raises(IndexError, 'b[8] = "."') + exc = raises(TypeError, "b[0] = 'E'") + assert str(exc.value) == "buffer is read-only" + assert str(b) == 'ello wor' + assert a.tostring() == 'hello world' + exc = raises(TypeError, "b[:] = '12345678'") + assert str(exc.value) == "buffer is read-only" + assert a.tostring() == 'hello world' + exc = raises(TypeError, 'b[8] = "."') + assert str(exc.value) == "buffer is read-only" b = buffer(b, 2, 3) assert len(b) == 3 - assert b[2] == '5' - assert b[:] == '345' + assert b[2] == ' ' + assert b[:] == 'lo ' raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'h123X5678ld' - raises(IndexError, 'b[3] = "."') + exc = raises(TypeError, "b[1] = 'X'") + assert str(exc.value) == "buffer is read-only" + assert a.tostring() == 'hello world' + exc = raises(TypeError, 'b[3] = "."') + assert str(exc.value) == "buffer is read-only" b = buffer(a, 55) assert len(b) == 0 assert b[:] == '' b = buffer(a, 6, 999) assert len(b) == 5 - assert b[:] == '678ld' + assert b[:] == 'world' raises(ValueError, buffer, a, -1) raises(ValueError, buffer, a, 0, -2) From noreply at buildbot.pypy.org Tue Apr 22 20:34:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:08 +0200 (CEST) Subject: [pypy-commit] pypy default: whitespace Message-ID: <20140422183408.6109E1D2658@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70865:6f1210b622ce Date: 2014-04-22 14:09 -0400 http://bitbucket.org/pypy/pypy/changeset/6f1210b622ce/ Log: whitespace diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -51,7 +51,7 @@ for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) - shutil.rmtree(self.temp_dir, True) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,7 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) - f.close() + f.close() def test_head(self): response = self.request( diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,6 +1,7 @@ import os, sys, imp import tempfile, binascii + def get_hashed_dir(cfile): with open(cfile,'r') as fid: content = fid.read() @@ -15,7 +16,7 @@ output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) - return output_dir + return output_dir def _get_c_extension_suffix(): From noreply at buildbot.pypy.org Tue Apr 22 20:34:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:14 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: merge default Message-ID: <20140422183414.0C5471D281D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70866:586ce9aec76d Date: 2014-04-22 14:11 -0400 http://bitbucket.org/pypy/pypy/changeset/586ce9aec76d/ Log: merge default diff too long, truncating to 2000 out of 15033 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -64,6 +64,7 @@ ^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ +^pypy/goal/.+\.lib$ ^pypy/_cache$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,6 +48,9 @@ def tearDown(self): os.chdir(self.old_dir) + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py --- a/lib-python/2.7/test/test_file.py +++ b/lib-python/2.7/test/test_file.py @@ -301,6 +301,7 @@ self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) # Reading after iteration hit EOF shouldn't hurt either + f.close() f = self.open(TESTFN, 'rb') try: for line in f: diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -231,9 +231,14 @@ unicwd = u'\xe7w\xf0' try: fsencoding = test_support.TESTFN_ENCODING or "ascii" - unicwd.encode(fsencoding) + asciival = unicwd.encode(fsencoding) + if fsencoding == "mbcs": + # http://bugs.python.org/issue850997 + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): - # FS encoding is probably ASCII + # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass else: with test_support.temp_cwd(unicwd): diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,6 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) + f.close() def test_head(self): response = self.request( diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,3 +1,5 @@ +import imp, os + try: import cpyext except ImportError: @@ -10,4 +12,12 @@ pass # obscure condition of _ctypes_test.py being imported by py.test else: import _pypy_testcapi - _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') + cfile = '_ctypes_test.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) + imp.load_module('_ctypes_test', fp, filename, description) + except ImportError: + print('could not find _ctypes_test in %s' % output_dir) + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,23 @@ import os, sys, imp -import tempfile +import tempfile, binascii + + +def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], content]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + def _get_c_extension_suffix(): for ext, mod, typ in imp.get_suffixes(): @@ -7,12 +25,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,17 @@ +import imp, os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: - import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + +import _pypy_testcapi +cfile = '_testcapimodule.c' +thisdir = os.path.dirname(__file__) +output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + imp.load_module('_testcapi', fp, filename, description) +except ImportError: + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -217,7 +217,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withmethodcache", True), + ("objspace.std.withtypeversion", True), ]), BoolOption("withrangelist", diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,10 +1,12 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ .. _`pypy/bin/pyinteractive.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/pyinteractive.py @@ -35,7 +37,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -49,21 +50,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -75,6 +76,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -93,16 +99,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less @@ -315,6 +335,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: http://codespeak.net/pypy/extradoc/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: http://codespeak.net/pypy/extradoc/talk/ep2006/usecases-slides.html -.. _`PyPy introduction at EuroPython 2006`: http://codespeak.net/pypy/extradoc/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: http://codespeak.net/pypy/extradoc/talk/22c3/hpk-tech.html -.. _`PyPy development method`: http://codespeak.net/pypy/extradoc/talk/pycon2006/method_talk.html -.. _`PyPy intro`: http://codespeak.net/pypy/extradoc/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: http://codespeak.net/pypy/extradoc/talk/oscon2003-paper.html -.. _`Architecture introduction slides`: http://codespeak.net/pypy/extradoc/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: http://codespeak.net/pypy/extradoc/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: http://codespeak.net/pypy/extradoc/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: http://codespeak.net/pypy/extradoc/talk/pypy-talk-pycon2005/README.html -.. _`Trouble in Paradise`: http://codespeak.net/pypy/extradoc/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: http://codespeak.net/pypy/extradoc/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf -.. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html +.. _`PyPy 3000`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt +.. _`PyPy introduction at EuroPython 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt +.. _`PyPy intro`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bitbucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt +.. _`Architecture introduction slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt +.. _`Trouble in Paradise`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -318,7 +318,7 @@ To read more about the RPython limitations read the `RPython description`_. -.. _`RPython description`: coding-guide.html#restricted-python +.. _`RPython description`: coding-guide.html#rpython-definition --------------------------------------------------------------- Does RPython have anything to do with Zope's Restricted Python? @@ -429,12 +429,27 @@ Could we use LLVM? ------------------ -There is a (static) translation backend using LLVM in the branch -``llvm-translation-backend``. It can translate PyPy with or without the JIT on -Linux. +In theory yes. But we tried to use it 5 or 6 times already, as a +translation backend or as a JIT backend --- and failed each time. -Using LLVM as our JIT backend looks interesting as well -- we made an attempt, -but it failed: LLVM has no way to patch the generated machine code. +In more details: using LLVM as a (static) translation backend is +pointless nowadays because you can generate C code and compile it with +clang. (Note that compiling PyPy with clang gives a result that is not +faster than compiling it with gcc.) We might in theory get extra +benefits from LLVM's GC integration, but this requires more work on the +LLVM side before it would be remotely useful. Anyway, it could be +interfaced via a custom primitive in the C code. (The latest such +experimental backend is in the branch ``llvm-translation-backend``, +which can translate PyPy with or without the JIT on Linux.) + +On the other hand, using LLVM as our JIT backend looks interesting as +well --- but again we made an attempt, and it failed: LLVM has no way to +patch the generated machine code. + +So the position of the core PyPy developers is that if anyone wants to +make an N+1'th attempt with LLVM, they are welcome, and will be happy to +provide help in the IRC channel, but they are left with the burden of proof +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? @@ -444,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -14,7 +14,7 @@ The present document describes the specific garbage collectors that we wrote in our framework. -.. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU-report on this topic`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf Garbage collectors currently written for the GC framework diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- @@ -389,7 +389,7 @@ .. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html -.. _`py library`: http://pylib.org +.. _`py library`: http://pylib.readthedocs.org/ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -92,7 +92,9 @@ `D07.1 Massive Parallelism and Translation Aspects`_ is a report about PyPy's optimization efforts, garbage collectors and massive parallelism (stackless) features. This report refers to the paper `PyPy's approach -to virtual machine construction`_. *(2007-02-28)* +to virtual machine construction`_. Extends the content previously +available in the document "Memory management and threading models as +translation aspects -- solutions and challenges". *(2007-02-28)* diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -103,7 +103,7 @@ .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy +.. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, @@ -491,7 +485,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -211,6 +211,9 @@ .. __: `recursion depth limit`_ +We also do not include any of the recent API additions to Stackless +Python, like ``set_atomic()``. Contributions welcome. + Recursion depth limit +++++++++++++++++++++ diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/stm.rst @@ -0,0 +1,284 @@ +====================== +Transactional Memory +====================== + +.. contents:: + + +This page is about ``pypy-stm``, a special in-development version of +PyPy which can run multiple independent CPU-hungry threads in the same +process in parallel. It is side-stepping what is known in the Python +world as the "global interpreter lock (GIL)" problem. + +"STM" stands for Software Transactional Memory, the technique used +internally. This page describes ``pypy-stm`` from the perspective of a +user, describes work in progress, and finally gives references to more +implementation details. + +This work was done mostly by Remi Meier and Armin Rigo. Thanks to all +donors for crowd-funding the work so far! Please have a look at the +`2nd call for donation`_. + +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html + + +Introduction +============ + +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats +listed below, it should be in theory within 25%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called +STM for Software Transactional Memory, which is the internal technique +used (see `Reference to implementation details`_). + +What you get in exchange for this slow-down is that ``pypy-stm`` runs +any multithreaded Python program on multiple CPUs at once. Programs +running two threads or more in parallel should ideally run faster than +in a regular PyPy, either now or soon as issues are fixed. In one way, +that's all there is to it: this is a GIL-less Python, feel free to +`download and try it`__. However, the deeper idea behind the +``pypy-stm`` project is to improve what is so far the state-of-the-art +for using multiple CPUs, which for cases where separate processes don't +work is done by writing explicitly multi-threaded programs. Instead, +``pypy-stm`` is pushing forward an approach to *hide* the threads, as +described below in `atomic sections`_. + + +.. __: + +Current status +============== + +**pypy-stm requires 64-bit Linux for now.** + +Development is done in the branch `stmgc-c7`_. If you are only +interested in trying it out, you can download a Ubuntu 12.04 binary +here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, +but not stripped of debug symbols). The current version supports four +"segments", which means that it will run up to four threads in parallel, +in other words it is running a thread pool up to 4 threads emulating normal +threads. + +To build a version from sources, you first need to compile a custom +version of clang; we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +for all checkouts). Then apply all the patches in `this directory`__: +they are fixes for the very extensive usage that pypy-stm does of a +clang-only feature (without them, you get crashes of clang). Then get +the branch `stmgc-c7`_ of PyPy and run:: + + rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py + +.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/misc/ +.. __: http://clang.llvm.org/get_started.html +.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ + + +Caveats: + +* So far, small examples work fine, but there are still a number of + bugs. We're busy fixing them. + +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not detected correctly, so may + cause segmentation faults. + +* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, + which is "only" bad). Moreover, you should run it with a command like + ``pypy-stm --jit trace_limit=60000 args...``; the default value of + 6000 for ``trace_limit`` is currently too low (6000 should become + reasonable again as we improve). Also, in order to produce machine + code, the JIT needs to enter a special single-threaded mode for now. + This all means that you *will* get very bad performance results if + your program doesn't run for *many* seconds for now. + +* The GC is new; although clearly inspired by PyPy's regular GC, it + misses a number of optimizations for now. Programs allocating large + numbers of small objects that don't immediately die, as well as + programs that modify large lists or dicts, suffer from these missing + optimizations. + +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). + +* The STM system is based on very efficient read/write barriers, which + are mostly done (their placement could be improved a bit in + JIT-generated machine code). But the overall bookkeeping logic could + see more improvements (see Statistics_ below). + +* You can use `atomic sections`_, but the most visible missing thing is + that you don't get reports about the "conflicts" you get. This would + be the first thing that you need in order to start using atomic + sections more extensively. Also, for now: for better results, try to + explicitly force a transaction break just before (and possibly after) + each large atomic section, with ``time.sleep(0)``. + +* Forking the process is slow because the complete memory needs to be + copied manually right now. + +* Very long-running processes should eventually crash on an assertion + error because of a non-implemented overflow of an internal 29-bit + number, but this requires at the very least ten hours --- more + probably, several days or more. + +.. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h + + + +Statistics +========== + +When a non-main thread finishes, you get statistics printed to stderr, +looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + spin loop 129512 0.094 s + +The first number is a counter; the second number gives the associated +time (the amount of real time that the thread was in this state; the sum +of all the times should be equal to the total time between the thread's +start and the thread's end). The most important points are "run +committed", which gives the amount of useful work, and "outside +transaction", which should give the time spent e.g. in library calls +(right now it seems to be a bit larger than that; to investigate). +Everything else is overhead of various forms. (Short-, medium- and +long-term future work involves reducing this overhead :-) + +These statistics are not printed out for the main thread, for now. + + +Atomic sections +=============== + +While one of the goal of pypy-stm is to give a GIL-free but otherwise +unmodified Python, the other goal is to push for a better way to use +multithreading. For this, you (as the Python programmer) get an API +in the ``__pypy__.thread`` submodule: + +* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in + a ``with __pypy__.thread.atomic:`` statement). It runs the whole + block of code without breaking the current transaction --- from + the point of view of a regular CPython/PyPy, this is equivalent to + saying that the GIL will not be released at all between the start and + the end of this block of code. + +The obvious usage is to use atomic blocks in the same way as one would +use locks: to protect changes to some shared data, you do them in a +``with atomic`` block, just like you would otherwise do them in a ``with +mylock`` block after ``mylock = thread.allocate_lock()``. This allows +you not to care about acquiring the correct locks in the correct order; +it is equivalent to having only one global lock. This is how +transactional memory is `generally described`__: as a way to efficiently +execute such atomic blocks, running them in parallel while giving the +illusion that they run in some serial order. + +.. __: http://en.wikipedia.org/wiki/Transactional_memory + +However, the less obvious intended usage of atomic sections is as a +wide-ranging replacement of explicit threads. You can turn a program +that is not multi-threaded at all into a program that uses threads +internally, together with large atomic sections to keep the behavior +unchanged. This capability can be hidden in a library or in the +framework you use; the end user's code does not need to be explicitly +aware of using threads. For a simple example of this, see +`transaction.py`_ in ``lib_pypy``. The idea is that if you have a +program where the function ``f(key, value)`` runs on every item of some +big dictionary, you can replace the loop with:: + + for key, value in bigdict.items(): + transaction.add(f, key, value) + transaction.run() + +This code runs the various calls to ``f(key, value)`` using a thread +pool, but every single call is done in an atomic section. The end +result is that the behavior should be exactly equivalent: you don't get +any extra multithreading issue. + +This approach hides the notion of threads from the end programmer, +including all the hard multithreading-related issues. This is not the +first alternative approach to explicit threads; for example, OpenMP_ is +one. However, it is one of the first ones which does not require the +code to be organized in a particular fashion. Instead, it works on any +Python program which has got latent, imperfect parallelism. Ideally, it +only requires that the end programmer identifies where this parallelism +is likely to be found, and communicates it to the system, using for +example the ``transaction.add()`` scheme. + +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP + +================== + +Other APIs in pypy-stm: + +* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" + in this pypy-stm. This is the limit above which more threads will not + be able to execute on more cores. (Right now it is limited to 4 due + to inter-segment overhead, but should be increased in the future. It + should also be settable, and the default value should depend on the + number of actual CPUs.) + +* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but + raises an exception if you attempt to nest it inside another + ``atomic``. + +* ``__pypy__.thread.signals_enabled``: a context manager that runs + its block with signals enabled. By default, signals are only + enabled in the main thread; a non-main thread will not receive + signals (this is like CPython). Enabling signals in non-main threads + is useful for libraries where threads are hidden and the end user is + not expecting his code to run elsewhere than in the main thread. + +Note that all of this API is (or will be) implemented in a regular PyPy +too: for example, ``with atomic`` will simply mean "don't release the +GIL" and ``getsegmentlimit()`` will return 1. + +================== + + +Reference to implementation details +=================================== + +The core of the implementation is in a separate C library called stmgc_, +in the c7_ subdirectory. Please see the `README.txt`_ for more +information. In particular, the notion of segment is discussed there. + +.. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ +.. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ +.. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt + +PyPy itself adds on top of it the automatic placement of read__ and write__ +barriers and of `"becomes-inevitable-now" barriers`__, the logic to +`start/stop transactions as an RPython transformation`__ and as +`supporting`__ `C code`__, and the support in the JIT (mostly as a +`transformation step on the trace`__ and generation of custom assembler +in `assembler.py`__). + +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/readbarrier.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/memory/gctransform/stmframework.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/inevitable.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/jitdriver.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.h +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ - using the `Boehm-Demers-Weiser conservative garbage collector`_ - using one of our custom `exact GCs implemented in RPython`_ -.. _`Boehm-Demers-Weiser conservative garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser conservative garbage collector`: http://hboehm.info/gc/ .. _`exact GCs implemented in RPython`: garbage_collection.html Almost all application-level Python code allocates objects at a very fast @@ -621,7 +621,7 @@ The C Back-End ============== -https://bitbucket.org/pypy/pypy/src/default/pypy/translator/c/ +https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ GenC is usually the most actively maintained backend -- everyone working on PyPy has a C compiler, for one thing -- and is usually where new features are diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -0,0 +1,154 @@ +======================= +What's new in PyPy 2.2+ +======================= + +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 + +.. branch: release-2.2.x + +.. branch: numpy-newbyteorder +Clean up numpy types, add newbyteorder functionality + +.. branch: windows-packaging +Package tk/tcl runtime with win32 + +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards + +.. branch: remove-intlong-smm +kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose + +.. branch: stdlib-2.7.6 +Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better + +.. branch: numpy-searchsorted +Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD + +.. branch: small-unroll-improvements +Improve optimization of small allocation-heavy loops in the JIT diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,119 +1,11 @@ ======================= -What's new in PyPy 2.2+ +What's new in PyPy 2.3+ From noreply at buildbot.pypy.org Tue Apr 22 20:34:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:15 +0200 (CEST) Subject: [pypy-commit] pypy default: remove unused code Message-ID: <20140422183415.295F91D281D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70867:8fffbe703bf0 Date: 2014-04-22 14:30 -0400 http://bitbucket.org/pypy/pypy/changeset/8fffbe703bf0/ Log: remove unused code diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1039,28 +1039,6 @@ init_defaults = [None, None, None] -# XXX consider moving to W_BytearrayObject or remove -def str_join__Bytearray_ANY(space, w_self, w_list): - list_w = space.listview(w_list) - if not list_w: - return W_BytearrayObject([]) - data = w_self.data - newdata = [] - for i in range(len(list_w)): - w_s = list_w[i] - if not (space.isinstance_w(w_s, space.w_str) or - space.isinstance_w(w_s, space.w_bytearray)): - raise oefmt(space.w_TypeError, - "sequence item %d: expected string, %T found", i, w_s) - - if data and i != 0: - newdata.extend(data) - newdata.extend([c for c in space.bufferstr_new_w(w_s)]) - return W_BytearrayObject(newdata) - -_space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) - - # XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): if slicelength == 0: From noreply at buildbot.pypy.org Tue Apr 22 20:34:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 20:34:16 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: merge default Message-ID: <20140422183416.40DA91D281D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70868:5499bfba3fb8 Date: 2014-04-22 14:31 -0400 http://bitbucket.org/pypy/pypy/changeset/5499bfba3fb8/ Log: merge default diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1048,28 +1048,6 @@ init_defaults = [None, None, None] -# XXX consider moving to W_BytearrayObject or remove -def str_join__Bytearray_ANY(space, w_self, w_list): - list_w = space.listview(w_list) - if not list_w: - return W_BytearrayObject([]) - data = w_self.data - newdata = [] - for i in range(len(list_w)): - w_s = list_w[i] - if not (space.isinstance_w(w_s, space.w_str) or - space.isinstance_w(w_s, space.w_bytearray)): - raise oefmt(space.w_TypeError, - "sequence item %d: expected string, %T found", i, w_s) - - if data and i != 0: - newdata.extend(data) - newdata.extend([c for c in space.buffer_w(w_s).as_str()]) - return W_BytearrayObject(newdata) - -_space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) - - # XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): if slicelength == 0: From noreply at buildbot.pypy.org Tue Apr 22 20:46:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 22 Apr 2014 20:46:39 +0200 (CEST) Subject: [pypy-commit] stmgc default: Give up trying to understand what was wrong with the lock-free Message-ID: <20140422184639.27BAB1D281D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1177:e4fa937a6860 Date: 2014-04-22 20:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/e4fa937a6860/ Log: Give up trying to understand what was wrong with the lock-free model, and reintroduce locks (in a way that shouldn't have a noticeable cost, hopefully). diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -14,13 +14,10 @@ #define EVENTUALLY(condition) \ { \ if (!(condition)) { \ - int _i; \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_acquire(lock_pages_privatizing[_i]); \ + acquire_privatization_lock(); \ if (!(condition)) \ stm_fatalerror("fails: " #condition); \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_release(lock_pages_privatizing[_i]); \ + release_privatization_lock(); \ } \ } #endif @@ -337,9 +334,12 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. + + Must be called with the privatization lock acquired. */ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(STM_PSEGMENT->privatization_lock == 1); uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; @@ -381,26 +381,9 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ + assert(memcmp(dst, src, copy_size) == 0); /* same page */ } - /* Do a full memory barrier. We must make sure that other - CPUs see the changes we did to the shared page ("S", - above) before we check the other segments below with - is_private_page(). Otherwise, we risk the following: - this CPU writes "S" but the writes are not visible yet; - then it checks is_private_page() and gets false, and does - nothing more; just afterwards another CPU sets its own - private_page bit and copies the page; but it risks doing - so before seeing the "S" writes. - - XXX what is the cost of this? If it's high, then we - should reorganize the code so that we buffer the second - parts and do them by bunch of N, after just one call to - __sync_synchronize()... - */ - __sync_synchronize(); - for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; @@ -417,7 +400,7 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ + assert(!memcmp(dst, src, copy_size)); /* same page */ } } @@ -431,12 +414,15 @@ if (STM_PSEGMENT->large_overflow_objects == NULL) return; + acquire_privatization_lock(); LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, synchronize_object_now(item)); + release_privatization_lock(); } static void push_modified_to_other_segments(void) { + acquire_privatization_lock(); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, @@ -456,6 +442,7 @@ private pages as needed */ synchronize_object_now(item); })); + release_privatization_lock(); list_clear(STM_PSEGMENT->modified_old_objects); } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -148,6 +148,12 @@ /* For sleeping contention management */ bool signal_when_done; + /* This lock is acquired when that segment calls synchronize_object_now. + On the rare event of a page_privatize(), the latter will acquire + all the locks in all segments. Otherwise, for the common case, + it's cheap. */ + uint8_t privatization_lock; + /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; @@ -226,3 +232,17 @@ static void copy_object_to_shared(object_t *obj, int source_segment_num); static void synchronize_object_now(object_t *obj); + +static inline void acquire_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_acquire(*lock); +} + +static inline void release_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_release(*lock); +} diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -92,17 +92,20 @@ /* uncommon case: need to initialize some more pages */ spinlock_acquire(lock_growth_large); - if (addr + size > uninitialized_page_start) { + char *start = uninitialized_page_start; + if (addr + size > start) { uintptr_t npages; - npages = (addr + size - uninitialized_page_start) / 4096UL; + npages = (addr + size - start) / 4096UL; npages += GCPAGE_NUM_PAGES; - if (uninitialized_page_stop - uninitialized_page_start < - npages * 4096UL) { + if (uninitialized_page_stop - start < npages * 4096UL) { stm_fatalerror("out of memory!"); /* XXX */ } - setup_N_pages(uninitialized_page_start, npages); - __sync_synchronize(); - uninitialized_page_start += npages * 4096UL; + setup_N_pages(start, npages); + if (!__sync_bool_compare_and_swap(&uninitialized_page_start, + start, + start + npages * 4096UL)) { + stm_fatalerror("uninitialized_page_start changed?"); + } } spinlock_release(lock_growth_large); return addr; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -217,7 +217,9 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) { + acquire_privatization_lock(); synchronize_object_now(obj); + release_privatization_lock(); } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -108,18 +108,20 @@ { /* check this thread's 'pages_privatized' bit */ uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + volatile struct page_shared_s *ps = (volatile struct page_shared_s *) + &pages_privatized[pagenum - PAGE_FLAG_START]; if (ps->by_segment & bitmask) { /* the page is already privatized; nothing to do */ return; } -#ifndef NDEBUG - spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + spinlock_acquire(get_priv_segment(i)->privatization_lock); + } /* add this thread's 'pages_privatized' bit */ - __sync_fetch_and_add(&ps->by_segment, bitmask); + ps->by_segment |= bitmask; /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -133,9 +135,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); -#ifndef NDEBUG - spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + for (i = NB_SEGMENTS; i >= 1; i--) { + spinlock_release(get_priv_segment(i)->privatization_lock); + } } static void _page_do_reshare(long segnum, uintptr_t pagenum) diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -34,20 +34,6 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; -/* Rules for concurrent access to this array, possibly with is_private_page(): - - - we clear bits only during major collection, when all threads are - synchronized anyway - - - we set only the bit corresponding to our segment number, using - an atomic addition; and we do it _before_ we actually make the - page private. - - - concurrently, other threads checking the bits might (rarely) - get the answer 'true' to is_private_page() even though it is not - actually private yet. This inconsistency is in the direction - that we want for synchronize_object_now(). -*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -72,7 +58,3 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } - -#ifndef NDEBUG -static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; -#endif From noreply at buildbot.pypy.org Tue Apr 22 20:48:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 22 Apr 2014 20:48:18 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Added tag release-2.3.0 for changeset 20e51c4389ed Message-ID: <20140422184818.40BD01D281D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r70869:0115d596792f Date: 2014-04-22 21:47 +0300 http://bitbucket.org/pypy/pypy/changeset/0115d596792f/ Log: Added tag release-2.3.0 for changeset 20e51c4389ed diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,4 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 From noreply at buildbot.pypy.org Tue Apr 22 20:56:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 22 Apr 2014 20:56:30 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Removed tag release-2.3.0 Message-ID: <20140422185630.CA4AA1D281D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r70870:e34676b6bac7 Date: 2014-04-22 21:55 +0300 http://bitbucket.org/pypy/pypy/changeset/e34676b6bac7/ Log: Removed tag release-2.3.0 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -7,3 +7,5 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 From noreply at buildbot.pypy.org Tue Apr 22 22:02:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 22 Apr 2014 22:02:08 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add a comment Message-ID: <20140422200208.765391D282B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r489:f1702dc129f0 Date: 2014-04-22 22:02 +0200 http://bitbucket.org/pypy/pypy.org/changeset/f1702dc129f0/ Log: Add a comment diff --git a/source/README b/source/README --- a/source/README +++ b/source/README @@ -9,3 +9,6 @@ you'll get html output in the parent directory. you need an account Then you can check it in, login to pypy at pypy.org and go to pypy.org/htdocs/ and type "hg pull -u". + +(Note: nowadays I think there is a script that automatically does +"hg pull -u" every half hour, so logging in is not needed.) From noreply at buildbot.pypy.org Tue Apr 22 22:12:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 22:12:31 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_connect_with_timeout_fail on win32 Message-ID: <20140422201231.67A641D2380@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70871:559dc95b2b7d Date: 2014-04-22 13:11 -0700 http://bitbucket.org/pypy/pypy/changeset/559dc95b2b7d/ Log: fix test_connect_with_timeout_fail on win32 diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -623,7 +623,7 @@ if WIN32: WSAData = cConfig.WSAData - WSAStartup = external('WSAStartup', [rffi.INT, lltype.Ptr(WSAData)], + WSAStartup = external('WSAStartup', [rwin32.WORD, lltype.Ptr(WSAData)], rffi.INT) WSAGetLastError = external('WSAGetLastError', [], rffi.INT) diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -33,9 +33,11 @@ from rpython.rlib import rwin32 def rsocket_startup(): wsadata = lltype.malloc(_c.WSAData, flavor='raw', zero=True) - res = _c.WSAStartup(1, wsadata) - lltype.free(wsadata, flavor='raw') - assert res == 0 + try: + res = _c.WSAStartup(0x0101, wsadata) + assert res == 0 + finally: + lltype.free(wsadata, flavor='raw') else: def rsocket_startup(): pass diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -345,12 +345,13 @@ err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work if errcodesok: assert err in (errno.ECONNREFUSED, errno.EADDRNOTAVAIL) + s.close() def test_connect_with_timeout_fail(): s = RSocket() s.settimeout(0.1) with py.test.raises(SocketTimeout): - s.connect(INETAddress('240.240.240.240', 12345)) + s.connect(INETAddress('10.10.10.10', 12345)) s.close() def test_connect_with_timeout_succeed(): From noreply at buildbot.pypy.org Tue Apr 22 22:40:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 22:40:35 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify Message-ID: <20140422204035.9119A1C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70872:8382ba2271f5 Date: 2014-04-22 13:36 -0700 http://bitbucket.org/pypy/pypy/changeset/8382ba2271f5/ Log: simplify diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -997,12 +997,8 @@ def get_msg(self): return _c.socket_strerror_str(self.errno) -if _c.WIN32: - def last_error(): - return CSocketError(rwin32.GetLastError()) -else: - def last_error(): - return CSocketError(_c.geterrno()) +def last_error(): + return CSocketError(_c.geterrno()) class GAIError(SocketErrorWithErrno): applevelerrcls = 'gaierror' From noreply at buildbot.pypy.org Tue Apr 22 22:40:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 22:40:36 +0200 (CEST) Subject: [pypy-commit] pypy default: socket errors have errno on win32 Message-ID: <20140422204036.C1C751C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70873:8ec8f81297a1 Date: 2014-04-22 13:38 -0700 http://bitbucket.org/pypy/pypy/changeset/8ec8f81297a1/ Log: socket errors have errno on win32 diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -3,11 +3,6 @@ from rpython.rlib.rsocket import * import socket as cpy_socket -# cannot test error codes in Win32 because ll2ctypes doesn't save -# the errors that WSAGetLastError() should return, making it likely -# that other operations stamped on it inbetween. -errcodesok = sys.platform != 'win32' - def setup_module(mod): rsocket_startup() @@ -257,14 +252,12 @@ assert addr.eq(sock.getsockname()) sock.listen(1) err = py.test.raises(CSocketError, sock.accept) - if errcodesok: - assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK) + assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK) s2 = RSocket(AF_INET, SOCK_STREAM) s2.setblocking(False) err = py.test.raises(CSocketError, s2.connect, addr) - if errcodesok: - assert err.value.errno in (errno.EINPROGRESS, errno.EWOULDBLOCK) + assert err.value.errno in (errno.EINPROGRESS, errno.EWOULDBLOCK) fd1, addr2 = sock.accept() s1 = RSocket(fd=fd1) @@ -274,8 +267,7 @@ assert addr2.eq(s1.getpeername()) err = s2.connect_ex(addr) # should now work - if errcodesok: - assert err in (0, errno.EISCONN) + assert err in (0, errno.EISCONN) s1.send('?') import time @@ -283,8 +275,7 @@ buf = s2.recv(100) assert buf == '?' err = py.test.raises(CSocketError, s1.recv, 5000) - if errcodesok: - assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK) + assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK) count = s2.send('x'*50000) assert 1 <= count <= 50000 while count: # Recv may return less than requested @@ -343,8 +334,7 @@ def test_connect_ex(): s = RSocket() err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work - if errcodesok: - assert err in (errno.ECONNREFUSED, errno.EADDRNOTAVAIL) + assert err in (errno.ECONNREFUSED, errno.EADDRNOTAVAIL) s.close() def test_connect_with_timeout_fail(): From noreply at buildbot.pypy.org Tue Apr 22 23:37:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 23:37:39 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix test_buffer Message-ID: <20140422213740.0335B1D2380@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70874:bcf0a81007dc Date: 2014-04-22 17:34 -0400 http://bitbucket.org/pypy/pypy/changeset/bcf0a81007dc/ Log: fix test_buffer diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -8,18 +8,18 @@ def test_buffer_w(self): space = self.space w_hello = space.wrap('hello world') - buf = space.buffer_w(w_hello) + buf = space.buffer_w(w_hello, space.BUF_SIMPLE) assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.newbuffer(buf)) is buf + assert space.buffer_w(space.newbuffer(buf), space.BUF_SIMPLE) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' - space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) + assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello, space.BUF_SIMPLE))) == 'hello world' + space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5), space.BUF_SIMPLE) def test_file_write(self): space = self.space - w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) + w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'), space.BUF_SIMPLE)) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') From noreply at buildbot.pypy.org Tue Apr 22 23:37:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 22 Apr 2014 23:37:41 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix _io test_ztranslation Message-ID: <20140422213741.27F6D1D2380@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70875:6c4fb10688bd Date: 2014-04-22 17:36 -0400 http://bitbucket.org/pypy/pypy/changeset/6c4fb10688bd/ Log: fix _io test_ztranslation diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -346,7 +346,7 @@ ObjSpace.ExceptionTable + ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict', 'unicode', 'complex', 'slice', 'bool', - 'basestring', 'object', 'bytearray']): + 'basestring', 'object', 'bytearray', 'buffer']): setattr(space, 'w_' + name, w_some_obj()) space.w_type = w_some_type() # From noreply at buildbot.pypy.org Wed Apr 23 00:40:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 23 Apr 2014 00:40:45 +0200 (CEST) Subject: [pypy-commit] pypy default: more test_rsocket fixes Message-ID: <20140422224045.D97A51C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70876:bda7970f05c4 Date: 2014-04-22 15:40 -0700 http://bitbucket.org/pypy/pypy/changeset/bda7970f05c4/ Log: more test_rsocket fixes diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -629,6 +629,10 @@ WSAGetLastError = external('WSAGetLastError', [], rffi.INT) geterrno = WSAGetLastError + # In tests, the first call to GetLastError is always wrong, because error + # is hidden by operations in ll2ctypes. Call it now. + WSAGetLastError() + from rpython.rlib import rwin32 def socket_strerror_str(errno): diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -340,8 +340,12 @@ def test_connect_with_timeout_fail(): s = RSocket() s.settimeout(0.1) + if sys.platform == 'win32': + addr = '169.254.169.254' + else: + addr = '240.240.240.240' with py.test.raises(SocketTimeout): - s.connect(INETAddress('10.10.10.10', 12345)) + s.connect(INETAddress(addr, 12345)) s.close() def test_connect_with_timeout_succeed(): From noreply at buildbot.pypy.org Wed Apr 23 00:47:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 23 Apr 2014 00:47:23 +0200 (CEST) Subject: [pypy-commit] pypy default: don't release gil for WSAGetLastError() Message-ID: <20140422224723.0F96F1C070B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70877:7185ae6c5812 Date: 2014-04-22 15:46 -0700 http://bitbucket.org/pypy/pypy/changeset/7185ae6c5812/ Log: don't release gil for WSAGetLastError() diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -626,7 +626,7 @@ WSAStartup = external('WSAStartup', [rwin32.WORD, lltype.Ptr(WSAData)], rffi.INT) - WSAGetLastError = external('WSAGetLastError', [], rffi.INT) + WSAGetLastError = external('WSAGetLastError', [], rffi.INT, releasegil=False) geterrno = WSAGetLastError # In tests, the first call to GetLastError is always wrong, because error From noreply at buildbot.pypy.org Wed Apr 23 03:29:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 23 Apr 2014 03:29:16 +0200 (CEST) Subject: [pypy-commit] pypy py3k: minor cleanup Message-ID: <20140423012916.434D61D2658@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70878:e205cdaeb95d Date: 2014-04-22 18:25 -0700 http://bitbucket.org/pypy/pypy/changeset/e205cdaeb95d/ Log: minor cleanup diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -1,12 +1,14 @@ -from rpython.rtyper.lltypesystem import rffi, lltype, llmemory +import os +import py + +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform as platform +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import ( + OperationError, exception_from_errno, oefmt, wrap_oserror) +from pypy.interpreter.gateway import unwrap_spec from pypy.module.posix.interp_posix import run_fork_hooks -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import ( - OperationError, exception_from_errno, wrap_oserror) -from rpython.translator.tool.cbuild import ExternalCompilationInfo -import py -import os thisdir = py.path.local(__file__).dirpath() @@ -80,8 +82,7 @@ prev_fd = -1 for fd in result: if fd < 0 or fd < prev_fd or fd > 1 << 30: - raise OperationError(space.w_ValueError, space.wrap( - "bad value(s) in fds_to_keep")) + raise oefmt(space.w_ValueError, "bad value(s) in fds_to_keep") return result @@ -116,8 +117,7 @@ """ close_fds = space.is_true(w_close_fds) if close_fds and errpipe_write < 3: # precondition - raise OperationError(space.w_ValueError, space.wrap( - "errpipe_write must be >= 3")) + raise oefmt(space.w_ValueError, "errpipe_write must be >= 3") fds_to_keep = build_fd_sequence(space, w_fds_to_keep) # No need to disable GC in PyPy: From noreply at buildbot.pypy.org Wed Apr 23 03:29:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 23 Apr 2014 03:29:17 +0200 (CEST) Subject: [pypy-commit] pypy py3k: act more like CPython's _PySequence_BytesToCharpArray: trigger MemoryErrors Message-ID: <20140423012917.763E21D2658@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70879:eb519320687f Date: 2014-04-22 18:25 -0700 http://bitbucket.org/pypy/pypy/changeset/eb519320687f/ Log: act more like CPython's _PySequence_BytesToCharpArray: trigger MemoryErrors with large __len__ results (unpackiterable doesn't). fixes test_capi.test_seq_bytes_to_charp_array diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -86,6 +86,13 @@ return result +def seqstr2charpp(space, w_seqstr): + """Sequence of bytes -> char**, NULL terminated""" + w_iter = space.iter(w_seqstr) + return rffi.liststr2charpp([space.bytes0_w(space.next(w_iter)) + for i in range(space.len_w(w_seqstr))]) + + @unwrap_spec(p2cread=int, p2cwrite=int, c2pread=int, c2pwrite=int, errread=int, errwrite=int, errpipe_read=int, errpipe_write=int, restore_signals=int, call_setsid=int) @@ -135,19 +142,16 @@ # These conversions are done in the parent process to avoid allocating # or freeing memory in the child process. try: - exec_array = [space.bytes0_w(w_item) - for w_item in space.listview(w_executable_list)] - l_exec_array = rffi.liststr2charpp(exec_array) + l_exec_array = seqstr2charpp(space, w_executable_list) if not space.is_none(w_process_args): - argv = [space.fsencode_w(w_item) - for w_item in space.listview(w_process_args)] + w_iter = space.iter(w_process_args) + argv = [space.fsencode_w(space.next(w_iter)) + for i in range(space.len_w(w_process_args))] l_argv = rffi.liststr2charpp(argv) if not space.is_none(w_env_list): - envp = [space.bytes0_w(w_item) - for w_item in space.listview(w_env_list)] - l_envp = rffi.liststr2charpp(envp) + l_envp = seqstr2charpp(space, w_env_list) l_fds_to_keep = lltype.malloc(rffi.CArrayPtr(rffi.LONG).TO, len(fds_to_keep) + 1, flavor='raw') diff --git a/pypy/module/_posixsubprocess/test/test_subprocess.py b/pypy/module/_posixsubprocess/test/test_subprocess.py --- a/pypy/module/_posixsubprocess/test/test_subprocess.py +++ b/pypy/module/_posixsubprocess/test/test_subprocess.py @@ -57,3 +57,18 @@ parent_pgid = os.getpgid(os.getpid()) child_pgid = int(output) assert parent_pgid != child_pgid + + def test_cpython_issue15736(self): + import _posixsubprocess + import sys + n = 0 + class Z(object): + def __len__(self): + return sys.maxsize + n + def __getitem__(self, i): + return b'x' + raises(MemoryError, _posixsubprocess.fork_exec, + 1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17) + n = 1 + raises(OverflowError, _posixsubprocess.fork_exec, + 1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17) From noreply at buildbot.pypy.org Wed Apr 23 03:29:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 23 Apr 2014 03:29:19 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: merge py3k Message-ID: <20140423012919.30DAD1D2658@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70880:73b4a5d8348a Date: 2014-04-22 18:28 -0700 http://bitbucket.org/pypy/pypy/changeset/73b4a5d8348a/ Log: merge py3k diff too long, truncating to 2000 out of 2490 lines diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -40,7 +40,7 @@ ``pypy-stm`` project is to improve what is so far the state-of-the-art for using multiple CPUs, which for cases where separate processes don't work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is flushing forward an approach to *hide* the threads, as +``pypy-stm`` is pushing forward an approach to *hide* the threads, as described below in `atomic sections`_. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -140,3 +140,6 @@ .. branch: numpypy-nditer Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -112,7 +112,7 @@ space.call_function(w_pathsetter, w_path) # import site try: - import_ = space.getattr(space.getbuiltinmodule('__builtin__'), + import_ = space.getattr(space.getbuiltinmodule('builtins'), space.wrap('__import__')) space.call_function(import_, space.wrap('site')) return 0 diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -26,5 +26,7 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + assert lltype.typeOf(res) == rffi.LONG + assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -1,12 +1,14 @@ -from rpython.rtyper.lltypesystem import rffi, lltype, llmemory +import os +import py + +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform as platform +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import ( + OperationError, exception_from_errno, oefmt, wrap_oserror) +from pypy.interpreter.gateway import unwrap_spec from pypy.module.posix.interp_posix import run_fork_hooks -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import ( - OperationError, exception_from_errno, wrap_oserror) -from rpython.translator.tool.cbuild import ExternalCompilationInfo -import py -import os thisdir = py.path.local(__file__).dirpath() @@ -80,11 +82,17 @@ prev_fd = -1 for fd in result: if fd < 0 or fd < prev_fd or fd > 1 << 30: - raise OperationError(space.w_ValueError, space.wrap( - "bad value(s) in fds_to_keep")) + raise oefmt(space.w_ValueError, "bad value(s) in fds_to_keep") return result +def seqstr2charpp(space, w_seqstr): + """Sequence of bytes -> char**, NULL terminated""" + w_iter = space.iter(w_seqstr) + return rffi.liststr2charpp([space.bytes0_w(space.next(w_iter)) + for i in range(space.len_w(w_seqstr))]) + + @unwrap_spec(p2cread=int, p2cwrite=int, c2pread=int, c2pwrite=int, errread=int, errwrite=int, errpipe_read=int, errpipe_write=int, restore_signals=int, call_setsid=int) @@ -116,8 +124,7 @@ """ close_fds = space.is_true(w_close_fds) if close_fds and errpipe_write < 3: # precondition - raise OperationError(space.w_ValueError, space.wrap( - "errpipe_write must be >= 3")) + raise oefmt(space.w_ValueError, "errpipe_write must be >= 3") fds_to_keep = build_fd_sequence(space, w_fds_to_keep) # No need to disable GC in PyPy: @@ -135,19 +142,16 @@ # These conversions are done in the parent process to avoid allocating # or freeing memory in the child process. try: - exec_array = [space.bytes0_w(w_item) - for w_item in space.listview(w_executable_list)] - l_exec_array = rffi.liststr2charpp(exec_array) + l_exec_array = seqstr2charpp(space, w_executable_list) if not space.is_none(w_process_args): - argv = [space.fsencode_w(w_item) - for w_item in space.listview(w_process_args)] + w_iter = space.iter(w_process_args) + argv = [space.fsencode_w(space.next(w_iter)) + for i in range(space.len_w(w_process_args))] l_argv = rffi.liststr2charpp(argv) if not space.is_none(w_env_list): - envp = [space.bytes0_w(w_item) - for w_item in space.listview(w_env_list)] - l_envp = rffi.liststr2charpp(envp) + l_envp = seqstr2charpp(space, w_env_list) l_fds_to_keep = lltype.malloc(rffi.CArrayPtr(rffi.LONG).TO, len(fds_to_keep) + 1, flavor='raw') diff --git a/pypy/module/_posixsubprocess/test/test_subprocess.py b/pypy/module/_posixsubprocess/test/test_subprocess.py --- a/pypy/module/_posixsubprocess/test/test_subprocess.py +++ b/pypy/module/_posixsubprocess/test/test_subprocess.py @@ -57,3 +57,18 @@ parent_pgid = os.getpgid(os.getpid()) child_pgid = int(output) assert parent_pgid != child_pgid + + def test_cpython_issue15736(self): + import _posixsubprocess + import sys + n = 0 + class Z(object): + def __len__(self): + return sys.maxsize + n + def __getitem__(self, i): + return b'x' + raises(MemoryError, _posixsubprocess.fork_exec, + 1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17) + n = 1 + raises(OverflowError, _posixsubprocess.fork_exec, + 1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -284,9 +284,11 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return ArrayIter(self, support.product(shape), shape, r[0], r[1]) - return ArrayIter(self, self.get_size(), self.shape, - self.strides, self.backstrides) + i = ArrayIter(self, support.product(shape), shape, r[0], r[1]) + else: + i = ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) + return i, i.reset() def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop, ufuncs +from pypy.module.micronumpy import descriptor, loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter @@ -156,10 +156,10 @@ "string is smaller than requested size")) a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) - ai = a.create_iter() + ai, state = a.create_iter() for val in items: - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) return space.wrap(a) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -32,24 +32,23 @@ self.reset() def reset(self): - self.iter = self.base.create_iter() + self.iter, self.state = self.base.create_iter() def descr_len(self, space): - return space.wrap(self.base.get_size()) + return space.wrap(self.iter.size) def descr_next(self, space): - if self.iter.done(): + if self.iter.done(self.state): raise OperationError(space.w_StopIteration, space.w_None) - w_res = self.iter.getitem() - self.iter.next() + w_res = self.iter.getitem(self.state) + self.state = self.iter.next(self.state) return w_res def descr_index(self, space): - return space.wrap(self.iter.index) + return space.wrap(self.state.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.iter.index)) - return space.newtuple([space.wrap(c) for c in coords]) + return space.newtuple([space.wrap(c) for c in self.state.indices]) def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or @@ -58,13 +57,13 @@ self.reset() base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) - base_iter = base.create_iter() - base_iter.next_skip_x(start) + base_iter, base_state = base.create_iter() + base_state = base_iter.next_skip_x(base_state, start) if length == 1: - return base_iter.getitem() + return base_iter.getitem(base_state) res = W_NDimArray.from_shape(space, [length], base.get_dtype(), base.get_order(), w_instance=base) - return loop.flatiter_getitem(res, base_iter, step) + return loop.flatiter_getitem(res, base_iter, base_state, step) def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,7 +42,6 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support -from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -52,19 +51,20 @@ self.shapelen = len(shape) self.indexes = [0] * len(shape) self._done = False - self.idx_w = [None] * len(idx_w) + self.idx_w_i = [None] * len(idx_w) + self.idx_w_s = [None] * len(idx_w) for i, w_idx in enumerate(idx_w): if isinstance(w_idx, W_NDimArray): - self.idx_w[i] = w_idx.create_iter(shape) + self.idx_w_i[i], self.idx_w_s[i] = w_idx.create_iter(shape) def done(self): return self._done @jit.unroll_safe def next(self): - for w_idx in self.idx_w: - if w_idx is not None: - w_idx.next() + for i, idx_w_i in enumerate(self.idx_w_i): + if idx_w_i is not None: + self.idx_w_s[i] = idx_w_i.next(self.idx_w_s[i]) for i in range(self.shapelen - 1, -1, -1): if self.indexes[i] < self.shape[i] - 1: self.indexes[i] += 1 @@ -79,6 +79,16 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] +class IterState(object): + _immutable_fields_ = ['iterator', 'index', 'indices[*]', 'offset'] + + def __init__(self, iterator, index, indices, offset): + self.iterator = iterator + self.index = index + self.indices = indices + self.offset = offset + + class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', 'strides[*]', 'backstrides[*]'] @@ -92,94 +102,65 @@ self.strides = strides self.backstrides = backstrides - self.index = 0 - self.indices = [0] * len(shape) - self.offset = array.start + def reset(self): + return IterState(self, 0, [0] * len(self.shape_m1), self.array.start) @jit.unroll_safe - def reset(self): - self.index = 0 + def next(self, state): + assert state.iterator is self + index = state.index + 1 + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - self.indices[i] = 0 - self.offset = self.array.start + idx = indices[i] + if idx < self.shape_m1[i]: + indices[i] = idx + 1 + offset += self.strides[i] + break + else: + indices[i] = 0 + offset -= self.backstrides[i] + return IterState(self, index, indices, offset) @jit.unroll_safe - def next(self): - self.index += 1 + def next_skip_x(self, state, step): + assert state.iterator is self + assert step >= 0 + if step == 0: + return state + index = state.index + step + indices = state.indices + offset = state.offset for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < self.shape_m1[i]: - self.indices[i] = idx + 1 - self.offset += self.strides[i] + idx = indices[i] + if idx < (self.shape_m1[i] + 1) - step: + indices[i] = idx + step + offset += self.strides[i] * step break else: - self.indices[i] = 0 - self.offset -= self.backstrides[i] - - @jit.unroll_safe - def next_skip_x(self, step): - assert step >= 0 - if step == 0: - return - self.index += step - for i in xrange(self.ndim_m1, -1, -1): - idx = self.indices[i] - if idx < (self.shape_m1[i] + 1) - step: - self.indices[i] = idx + step - self.offset += self.strides[i] * step - break - else: - rem_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + rem_step = (idx + step) // (self.shape_m1[i] + 1) cur_step = step - rem_step * (self.shape_m1[i] + 1) - self.indices[i] += cur_step - self.offset += self.strides[i] * cur_step + indices[i] = idx + cur_step + offset += self.strides[i] * cur_step step = rem_step assert step > 0 + return IterState(self, index, indices, offset) - def done(self): - return self.index >= self.size + def done(self, state): + assert state.iterator is self + return state.index >= self.size - def getitem(self): - return self.array.getitem(self.offset) + def getitem(self, state): + assert state.iterator is self + return self.array.getitem(state.offset) - def getitem_bool(self): - return self.array.getitem_bool(self.offset) + def getitem_bool(self, state): + assert state.iterator is self + return self.array.getitem_bool(state.offset) - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - -class SliceIterator(ArrayIter): - def __init__(self, arr, strides, backstrides, shape, order="C", - backward=False, dtype=None): - if dtype is None: - dtype = arr.implementation.dtype - self.dtype = dtype - self.arr = arr - if backward: - self.slicesize = shape[0] - self.gap = [support.product(shape[1:]) * dtype.elsize] - strides = strides[1:] - backstrides = backstrides[1:] - shape = shape[1:] - strides.reverse() - backstrides.reverse() - shape.reverse() - size = support.product(shape) - else: - shape = [support.product(shape)] - strides, backstrides = calc_strides(shape, dtype, order) - size = 1 - self.slicesize = support.product(shape) - self.gap = strides - - ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) - - def getslice(self): - from pypy.module.micronumpy.concrete import SliceArray - retVal = SliceArray(self.offset, self.gap, self.backstrides, - [self.slicesize], self.arr.implementation, self.arr, self.dtype) - return retVal + def setitem(self, state, elem): + assert state.iterator is self + self.array.setitem(state.offset, elem) def AxisIter(array, shape, axis, cumulative): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -12,11 +12,10 @@ AllButAxisIter -call2_driver = jit.JitDriver(name='numpy_call2', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_lhs', 'w_rhs', 'out', - 'left_iter', 'right_iter', 'out_iter']) +call2_driver = jit.JitDriver( + name='numpy_call2', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority @@ -46,47 +45,40 @@ if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=lhs_for_subtype) - left_iter = w_lhs.create_iter(shape) - right_iter = w_rhs.create_iter(shape) - out_iter = out.create_iter(shape) + left_iter, left_state = w_lhs.create_iter(shape) + right_iter, right_state = w_rhs.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_lhs=w_lhs, w_rhs=w_rhs, - out=out, - left_iter=left_iter, right_iter=right_iter, - out_iter=out_iter) - w_left = left_iter.getitem().convert_to(space, calc_dtype) - w_right = right_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, w_left, w_right).convert_to( + calc_dtype=calc_dtype, res_dtype=res_dtype) + w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) + w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( space, res_dtype)) - left_iter.next() - right_iter.next() - out_iter.next() + left_state = left_iter.next(left_state) + right_state = right_iter.next(right_state) + out_state = out_iter.next(out_state) return out -call1_driver = jit.JitDriver(name='numpy_call1', - greens = ['shapelen', 'func', 'calc_dtype', - 'res_dtype'], - reds = ['shape', 'w_obj', 'out', 'obj_iter', - 'out_iter']) +call1_driver = jit.JitDriver( + name='numpy_call1', + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto') def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) - obj_iter = w_obj.create_iter(shape) - out_iter = out.create_iter(shape) + obj_iter, obj_state = w_obj.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype, - shape=shape, w_obj=w_obj, out=out, - obj_iter=obj_iter, out_iter=out_iter) - elem = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(func(calc_dtype, elem).convert_to(space, res_dtype)) - out_iter.next() - obj_iter.next() + calc_dtype=calc_dtype, res_dtype=res_dtype) + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) return out setslice_driver = jit.JitDriver(name='numpy_setslice', @@ -96,18 +88,20 @@ def setslice(space, shape, target, source): # note that unlike everything else, target and source here are # array implementations, not arrays - target_iter = target.create_iter(shape) - source_iter = source.create_iter(shape) + target_iter, target_state = target.create_iter(shape) + source_iter, source_state = source.create_iter(shape) dtype = target.dtype shapelen = len(shape) - while not target_iter.done(): + while not target_iter.done(target_state): setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + val = source_iter.getitem(source_state) if dtype.is_str_or_unicode(): - target_iter.setitem(dtype.coerce(space, source_iter.getitem())) + val = dtype.coerce(space, val) else: - target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) - target_iter.next() - source_iter.next() + val = val.convert_to(space, dtype) + target_iter.setitem(target_state, val) + target_state = target_iter.next(target_state) + source_state = source_iter.next(source_state) return target reduce_driver = jit.JitDriver(name='numpy_reduce', @@ -116,22 +110,22 @@ reds = 'auto') def compute_reduce(space, obj, calc_dtype, func, done_func, identity): - obj_iter = obj.create_iter() + obj_iter, obj_state = obj.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, calc_dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval cur_value = func(calc_dtype, cur_value, rval) - obj_iter.next() + obj_state = obj_iter.next(obj_state) return cur_value reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver', @@ -139,69 +133,76 @@ reds = 'auto') def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity): - obj_iter = obj.create_iter() - out_iter = out.create_iter() + obj_iter, obj_state = obj.create_iter() + out_iter, out_state = out.create_iter() if identity is None: - cur_value = obj_iter.getitem().convert_to(space, calc_dtype) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + cur_value = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) else: cur_value = identity.convert_to(space, calc_dtype) shapelen = len(obj.get_shape()) - while not obj_iter.done(): + while not obj_iter.done(obj_state): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, dtype=calc_dtype) - rval = obj_iter.getitem().convert_to(space, calc_dtype) + rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) - out_iter.setitem(cur_value) - out_iter.next() - obj_iter.next() + out_iter.setitem(out_state, cur_value) + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) def fill(arr, box): - arr_iter = arr.create_iter() - while not arr_iter.done(): - arr_iter.setitem(box) - arr_iter.next() + arr_iter, arr_state = arr.create_iter() + while not arr_iter.done(arr_state): + arr_iter.setitem(arr_state, box) + arr_state = arr_iter.next(arr_state) def assign(space, arr, seq): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() for item in seq: - arr_iter.setitem(arr_dtype.coerce(space, item)) - arr_iter.next() + arr_iter.setitem(arr_state, arr_dtype.coerce(space, item)) + arr_state = arr_iter.next(arr_state) where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') def where(space, out, shape, arr, x, y, dtype): - out_iter = out.create_iter(shape) - arr_iter = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) arr_dtype = arr.get_dtype() - x_iter = x.create_iter(shape) - y_iter = y.create_iter(shape) + x_iter, x_state = x.create_iter(shape) + y_iter, y_state = y.create_iter(shape) if x.is_scalar(): if y.is_scalar(): - iter = arr_iter + iter, state = arr_iter, arr_state else: - iter = y_iter + iter, state = y_iter, y_state else: - iter = x_iter + iter, state = x_iter, x_state shapelen = len(shape) - while not iter.done(): + while not iter.done(state): where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, arr_dtype=arr_dtype) - w_cond = arr_iter.getitem() + w_cond = arr_iter.getitem(arr_state) if arr_dtype.itemtype.bool(w_cond): - w_val = x_iter.getitem().convert_to(space, dtype) + w_val = x_iter.getitem(x_state).convert_to(space, dtype) else: - w_val = y_iter.getitem().convert_to(space, dtype) - out_iter.setitem(w_val) - out_iter.next() - arr_iter.next() - x_iter.next() - y_iter.next() + w_val = y_iter.getitem(y_state).convert_to(space, dtype) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) + x_state = x_iter.next(x_state) + y_state = y_iter.next(y_state) + if x.is_scalar(): + if y.is_scalar(): + state = arr_state + else: + state = y_state + else: + state = x_state return out axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', @@ -212,31 +213,36 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) + out_state = out_iter.reset() if cumulative: temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) + temp_state = temp_iter.reset() else: - temp_iter = out_iter # hack - arr_iter = arr.create_iter() + temp_iter = out_iter # hack + temp_state = out_state + arr_iter, arr_state = arr.create_iter() if identity is not None: identity = identity.convert_to(space, dtype) shapelen = len(shape) - while not out_iter.done(): + while not out_iter.done(out_state): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - assert not arr_iter.done() - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.indices[axis] == 0: + assert not arr_iter.done(arr_state) + w_val = arr_iter.getitem(arr_state).convert_to(space, dtype) + if out_state.indices[axis] == 0: if identity is not None: w_val = func(dtype, identity, w_val) else: - cur = temp_iter.getitem() + cur = temp_iter.getitem(temp_state) w_val = func(dtype, cur, w_val) - out_iter.setitem(w_val) + out_iter.setitem(out_state, w_val) + out_state = out_iter.next(out_state) if cumulative: - temp_iter.setitem(w_val) - temp_iter.next() - arr_iter.next() - out_iter.next() + temp_iter.setitem(temp_state, w_val) + temp_state = temp_iter.next(temp_state) + else: + temp_state = out_state + arr_state = arr_iter.next(arr_state) return out @@ -249,18 +255,18 @@ result = 0 idx = 1 dtype = arr.get_dtype() - iter = arr.create_iter() - cur_best = iter.getitem() - iter.next() + iter, state = arr.create_iter() + cur_best = iter.getitem(state) + state = iter.next(state) shapelen = len(arr.get_shape()) - while not iter.done(): + while not iter.done(state): arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_val = iter.getitem() + w_val = iter.getitem(state) new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): result = idx cur_best = new_best - iter.next() + state = iter.next(state) idx += 1 return result return argmin_argmax @@ -291,17 +297,19 @@ right_impl = right.implementation assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.create_iter() + outi, outs = result.create_iter() lefti = AllButAxisIter(left_impl, len(left_shape) - 1) righti = AllButAxisIter(right_impl, right_critical_dim) + lefts = lefti.reset() + rights = righti.reset() n = left_impl.shape[-1] s1 = left_impl.strides[-1] s2 = right_impl.strides[right_critical_dim] - while not lefti.done(): - while not righti.done(): - oval = outi.getitem() - i1 = lefti.offset - i2 = righti.offset + while not lefti.done(lefts): + while not righti.done(rights): + oval = outi.getitem(outs) + i1 = lefts.offset + i2 = rights.offset i = 0 while i < n: i += 1 @@ -311,11 +319,11 @@ oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) i1 += s1 i2 += s2 - outi.setitem(oval) - outi.next() - righti.next() - righti.reset() - lefti.next() + outi.setitem(outs, oval) + outs = outi.next(outs) + rights = righti.next(rights) + rights = righti.reset() + lefts = lefti.next(lefts) return result count_all_true_driver = jit.JitDriver(name = 'numpy_count', @@ -324,13 +332,13 @@ def count_all_true_concrete(impl): s = 0 - iter = impl.create_iter() + iter, state = impl.create_iter() shapelen = len(impl.shape) dtype = impl.dtype - while not iter.done(): + while not iter.done(state): count_all_true_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - s += iter.getitem_bool() - iter.next() + s += iter.getitem_bool(state) + state = iter.next(state) return s def count_all_true(arr): @@ -344,18 +352,18 @@ reds = 'auto') def nonzero(res, arr, box): - res_iter = res.create_iter() - arr_iter = arr.create_iter() + res_iter, res_state = res.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) - while not arr_iter.done(): + while not arr_iter.done(arr_state): nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) - if arr_iter.getitem_bool(): + if arr_iter.getitem_bool(arr_state): for d in dims: - res_iter.setitem(box(arr_iter.indices[d])) - res_iter.next() - arr_iter.next() + res_iter.setitem(res_state, box(arr_state.indices[d])) + res_state = res_iter.next(res_state) + arr_state = arr_iter.next(arr_state) return res @@ -365,26 +373,26 @@ reds = 'auto') def getitem_filter(res, arr, index): - res_iter = res.create_iter() + res_iter, res_state = res.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() - arr_iter = arr.create_iter() + index_iter, index_state = index.create_iter() + arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() index_dtype = index.get_dtype() # XXX length of shape of index as well? - while not index_iter.done(): + while not index_iter.done(index_state): getitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - res_iter.setitem(arr_iter.getitem()) - res_iter.next() - index_iter.next() - arr_iter.next() + if index_iter.getitem_bool(index_state): + res_iter.setitem(res_state, arr_iter.getitem(arr_state)) + res_state = res_iter.next(res_state) + index_state = index_iter.next(index_state) + arr_state = arr_iter.next(arr_state) return res setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', @@ -393,41 +401,42 @@ reds = 'auto') def setitem_filter(space, arr, index, value): - arr_iter = arr.create_iter() + arr_iter, arr_state = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: - index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) + index_iter, index_state = index.create_iter(arr.get_shape(), backward_broadcast=True) else: - index_iter = index.create_iter() + index_iter, index_state = index.create_iter() if value.get_size() == 1: - value_iter = value.create_iter(arr.get_shape()) + value_iter, value_state = value.create_iter(arr.get_shape()) else: - value_iter = value.create_iter() + value_iter, value_state = value.create_iter() index_dtype = index.get_dtype() arr_dtype = arr.get_dtype() - while not index_iter.done(): + while not index_iter.done(index_state): setitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, ) - if index_iter.getitem_bool(): - arr_iter.setitem(arr_dtype.coerce(space, value_iter.getitem())) - value_iter.next() - arr_iter.next() - index_iter.next() + if index_iter.getitem_bool(index_state): + val = arr_dtype.coerce(space, value_iter.getitem(value_state)) + value_state = value_iter.next(value_state) + arr_iter.setitem(arr_state, val) + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], reds = 'auto') -def flatiter_getitem(res, base_iter, step): - ri = res.create_iter() +def flatiter_getitem(res, base_iter, base_state, step): + ri, rs = res.create_iter() dtype = res.get_dtype() - while not ri.done(): + while not ri.done(rs): flatiter_getitem_driver.jit_merge_point(dtype=dtype) - ri.setitem(base_iter.getitem()) - base_iter.next_skip_x(step) - ri.next() + ri.setitem(rs, base_iter.getitem(base_state)) + base_state = base_iter.next_skip_x(base_state, step) + rs = ri.next(rs) return res flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', @@ -436,19 +445,21 @@ def flatiter_setitem(space, arr, val, start, step, length): dtype = arr.get_dtype() - arr_iter = arr.create_iter() - val_iter = val.create_iter() - arr_iter.next_skip_x(start) + arr_iter, arr_state = arr.create_iter() + val_iter, val_state = val.create_iter() + arr_state = arr_iter.next_skip_x(arr_state, start) while length > 0: flatiter_setitem_driver.jit_merge_point(dtype=dtype) + val = val_iter.getitem(val_state) if dtype.is_str_or_unicode(): - arr_iter.setitem(dtype.coerce(space, val_iter.getitem())) + val = dtype.coerce(space, val) else: - arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) + val = val.convert_to(space, dtype) + arr_iter.setitem(arr_state, val) # need to repeat i_nput values until all assignments are done - arr_iter.next_skip_x(step) + arr_state = arr_iter.next_skip_x(arr_state, step) + val_state = val_iter.next(val_state) length -= 1 - val_iter.next() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], @@ -456,30 +467,30 @@ def fromstring_loop(space, a, dtype, itemsize, s): i = 0 - ai = a.create_iter() - while not ai.done(): + ai, state = a.create_iter() + while not ai.done(state): fromstring_driver.jit_merge_point(dtype=dtype, itemsize=itemsize) sub = s[i*itemsize:i*itemsize + itemsize] if dtype.is_str_or_unicode(): val = dtype.coerce(space, space.wrap(sub)) else: val = dtype.itemtype.runpack_str(space, sub) - ai.setitem(val) - ai.next() + ai.setitem(state, val) + state = ai.next(state) i += 1 def tostring(space, arr): builder = StringBuilder() - iter = arr.create_iter() + iter, state = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) - while not iter.done(): - w_res_str.implementation.setitem(0, iter.getitem()) + while not iter.done(state): + w_res_str.implementation.setitem(0, iter.getitem(state)) for i in range(itemsize): builder.append(res_str_casted[i]) - iter.next() + state = iter.next(state) return builder.build() getitem_int_driver = jit.JitDriver(name = 'numpy_getitem_int', @@ -500,8 +511,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] res.descr_setitem(space, space.newtuple(prefix_w[:prefixlen] + @@ -528,8 +539,8 @@ # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w[i] is not None: - index_w[i] = iter.idx_w[i].getitem() + if iter.idx_w_i[i] is not None: + index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] w_idx = space.newtuple(prefix_w[:prefixlen] + iter.get_index(space, @@ -547,13 +558,14 @@ def byteswap(from_, to): dtype = from_.dtype - from_iter = from_.create_iter() - to_iter = to.create_iter() - while not from_iter.done(): + from_iter, from_state = from_.create_iter() + to_iter, to_state = to.create_iter() + while not from_iter.done(from_state): byteswap_driver.jit_merge_point(dtype=dtype) - to_iter.setitem(dtype.itemtype.byteswap(from_iter.getitem())) - to_iter.next() - from_iter.next() + val = dtype.itemtype.byteswap(from_iter.getitem(from_state)) + to_iter.setitem(to_state, val) + to_state = to_iter.next(to_state) + from_state = from_iter.next(from_state) choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], @@ -561,13 +573,15 @@ def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) - iterators = [a.create_iter(shape) for a in choices] - arr_iter = arr.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + pairs = [a.create_iter(shape) for a in choices] + iterators = [i[0] for i in pairs] + states = [i[1] for i in pairs] + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, mode=mode) - index = support.index_w(space, arr_iter.getitem()) + index = support.index_w(space, arr_iter.getitem(arr_state)) if index < 0 or index >= len(iterators): if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( @@ -580,72 +594,73 @@ index = 0 else: index = len(iterators) - 1 - out_iter.setitem(iterators[index].getitem().convert_to(space, dtype)) - for iter in iterators: - iter.next() - out_iter.next() - arr_iter.next() + val = iterators[index].getitem(states[index]).convert_to(space, dtype) + out_iter.setitem(out_state, val) + for i in range(len(iterators)): + states[i] = iterators[i].next(states[i]) + out_state = out_iter.next(out_state) + arr_state = arr_iter.next(arr_state) clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def clip(space, arr, shape, min, max, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) dtype = out.get_dtype() shapelen = len(shape) - min_iter = min.create_iter(shape) - max_iter = max.create_iter(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + min_iter, min_state = min.create_iter(shape) + max_iter, max_state = max.create_iter(shape) + out_iter, out_state = out.create_iter(shape) + while not arr_iter.done(arr_state): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) - w_min = min_iter.getitem().convert_to(space, dtype) - w_max = max_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) + w_min = min_iter.getitem(min_state).convert_to(space, dtype) + w_max = max_iter.getitem(max_state).convert_to(space, dtype) if dtype.itemtype.lt(w_v, w_min): w_v = w_min elif dtype.itemtype.gt(w_v, w_max): w_v = w_max - out_iter.setitem(w_v) - arr_iter.next() - max_iter.next() - out_iter.next() - min_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + min_state = min_iter.next(min_state) + max_state = max_iter.next(max_state) + out_state = out_iter.next(out_state) round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], reds = 'auto') def round(space, arr, dtype, shape, decimals, out): - arr_iter = arr.create_iter(shape) + arr_iter, arr_state = arr.create_iter(shape) + out_iter, out_state = out.create_iter(shape) shapelen = len(shape) - out_iter = out.create_iter(shape) - while not arr_iter.done(): + while not arr_iter.done(arr_state): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) w_v = dtype.itemtype.round(w_v, decimals) - out_iter.setitem(w_v) - arr_iter.next() - out_iter.next() + out_iter.setitem(out_state, w_v) + arr_state = arr_iter.next(arr_state) + out_state = out_iter.next(out_state) diagonal_simple_driver = jit.JitDriver(name='numpy_diagonal_simple_driver', greens = ['axis1', 'axis2'], reds = 'auto') def diagonal_simple(space, arr, out, offset, axis1, axis2, size): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() i = 0 index = [0] * 2 while i < size: diagonal_simple_driver.jit_merge_point(axis1=axis1, axis2=axis2) index[axis1] = i index[axis2] = i + offset - out_iter.setitem(arr.getitem_index(space, index)) + out_iter.setitem(out_state, arr.getitem_index(space, index)) i += 1 - out_iter.next() + out_state = out_iter.next(out_state) def diagonal_array(space, arr, out, offset, axis1, axis2, shape): - out_iter = out.create_iter() + out_iter, out_state = out.create_iter() iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 @@ -667,6 +682,6 @@ indexes = (iter.indexes[:a] + [last_index + offset] + iter.indexes[a:b] + [last_index] + iter.indexes[b:shapelen_minus_1]) - out_iter.setitem(arr.getitem_index(space, indexes)) + out_iter.setitem(out_state, arr.getitem_index(space, indexes)) iter.next() - out_iter.next() + out_state = out_iter.next(out_state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -18,7 +18,7 @@ multi_axis_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator -from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ +from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple @@ -260,24 +260,24 @@ return space.call_function(cache.w_array_str, self) def dump_data(self, prefix='array(', separator=',', suffix=')'): - i = self.create_iter() + i, state = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() s.append(prefix) if not self.is_scalar(): s.append('[') - while not i.done(): + while not i.done(state): if first: first = False else: s.append(separator) s.append(' ') if self.is_scalar() and dtype.is_str(): - s.append(dtype.itemtype.to_str(i.getitem())) + s.append(dtype.itemtype.to_str(i.getitem(state))) else: - s.append(dtype.itemtype.str_format(i.getitem())) - i.next() + s.append(dtype.itemtype.str_format(i.getitem(state))) + state = i.next(state) if not self.is_scalar(): s.append(']') s.append(suffix) @@ -469,29 +469,33 @@ def descr_get_flatiter(self, space): return space.wrap(W_FlatIterator(self)) - def to_coords(self, space, w_index): - coords, _, _ = to_coords(space, self.get_shape(), - self.get_size(), self.get_order(), - w_index) - return coords - - def descr_item(self, space, w_arg=None): - if space.is_none(w_arg): + def descr_item(self, space, __args__): + args_w, kw_w = __args__.unpack() + if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): + args_w = space.fixedview(args_w[0]) + shape = self.get_shape() + coords = [0] * len(shape) + if len(args_w) == 0: if self.get_size() == 1: w_obj = self.get_scalar_value() assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, "can only convert an array of size 1 to a Python scalar") - if space.isinstance_w(w_arg, space.w_int): - if self.is_scalar(): - raise oefmt(space.w_IndexError, "index out of bounds") - i = self.to_coords(space, w_arg) - item = self.getitem(space, i) - assert isinstance(item, boxes.W_GenericBox) - return item.item(space) - raise OperationError(space.w_NotImplementedError, space.wrap( - "non-int arg not supported")) + elif len(args_w) == 1 and len(shape) != 1: + value = support.index_w(space, args_w[0]) + value = support.check_and_adjust_index(space, value, self.get_size(), -1) + for idim in range(len(shape) - 1, -1, -1): + coords[idim] = value % shape[idim] + value //= shape[idim] + elif len(args_w) == len(shape): + for idim in range(len(shape)): + coords[idim] = support.index_w(space, args_w[idim]) + else: + raise oefmt(space.w_ValueError, "incorrect number of indices for array") + item = self.getitem(space, coords) + assert isinstance(item, boxes.W_GenericBox) + return item.item(space) def descr_itemset(self, space, args_w): if len(args_w) == 0: @@ -818,8 +822,8 @@ if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) - iter = self.create_iter() - return space.wrap(space.is_true(iter.getitem())) + iter, state = self.create_iter() + return space.wrap(space.is_true(iter.getitem(state))) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): @@ -1085,11 +1089,11 @@ builder = StringBuilder() if isinstance(self.implementation, SliceArray): - iter = self.implementation.create_iter() - while not iter.done(): - box = iter.getitem() + iter, state = self.implementation.create_iter() + while not iter.done(state): + box = iter.getitem(state) builder.append(box.raw_str()) - iter.next() + state = iter.next(state) else: builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -1,99 +1,50 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import ufuncs, support, concrete from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.descriptor import decode_w_dtype +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement, shape_agreement_multiple) -from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator -from pypy.module.micronumpy.concrete import SliceArray -from pypy.module.micronumpy.descriptor import decode_w_dtype -from pypy.module.micronumpy import ufuncs, support + shape_agreement, shape_agreement_multiple) -class AbstractIterator(object): - def done(self): - raise NotImplementedError("Abstract Class") - - def next(self): - raise NotImplementedError("Abstract Class") - - def getitem(self, space, array): - raise NotImplementedError("Abstract Class") - -class IteratorMixin(object): - _mixin_ = True - def __init__(self, it, op_flags): - self.it = it - self.op_flags = op_flags - - def done(self): - return self.it.done() - - def next(self): - self.it.next() - - def getitem(self, space, array): - return self.op_flags.get_it_item[self.index](space, array, self.it) - - def setitem(self, space, array, val): - xxx - -class BoxIterator(IteratorMixin, AbstractIterator): - index = 0 - -class ExternalLoopIterator(IteratorMixin, AbstractIterator): - index = 1 - def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): + if space.is_w(w_op_flags, space.w_None): + w_op_flags = space.newtuple([space.wrap('readonly')]) + if not space.isinstance_w(w_op_flags, space.w_tuple) and not \ + space.isinstance_w(w_op_flags, space.w_list): + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) ret = [] - if space.is_w(w_op_flags, space.w_None): + w_lst = space.listview(w_op_flags) + if space.isinstance_w(w_lst[0], space.w_tuple) or \ + space.isinstance_w(w_lst[0], space.w_list): + if len(w_lst) != n: + raise oefmt(space.w_ValueError, + '%s must be a tuple or array of per-op flag-tuples', + name) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) + else: + op_flag = parse_one_arg(space, w_lst) for i in range(n): - ret.append(OpFlag()) - elif not space.isinstance_w(w_op_flags, space.w_tuple) and not \ - space.isinstance_w(w_op_flags, space.w_list): - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - else: - w_lst = space.listview(w_op_flags) - if space.isinstance_w(w_lst[0], space.w_tuple) or \ - space.isinstance_w(w_lst[0], space.w_list): - if len(w_lst) != n: - raise OperationError(space.w_ValueError, space.wrap( - '%s must be a tuple or array of per-op flag-tuples' % name)) - for item in w_lst: - ret.append(parse_one_arg(space, space.listview(item))) - else: - op_flag = parse_one_arg(space, w_lst) - for i in range(n): - ret.append(op_flag) + ret.append(op_flag) return ret + class OpFlag(object): def __init__(self): - self.rw = 'r' + self.rw = '' self.broadcast = True self.force_contig = False self.force_align = False self.native_byte_order = False self.tmp_copy = '' self.allocate = False - self.get_it_item = (get_readonly_item, get_readonly_slice) -def get_readonly_item(space, array, it): - return space.wrap(it.getitem()) - -def get_readwrite_item(space, array, it): - #create a single-value view (since scalars are not views) - res = SliceArray(it.array.start + it.offset, [0], [0], [1,], it.array, array) - #it.dtype.setitem(res, 0, it.getitem()) - return W_NDimArray(res) - -def get_readonly_slice(space, array, it): - return W_NDimArray(it.getslice().readonly()) - -def get_readwrite_slice(space, array, it): - return W_NDimArray(it.getslice()) def parse_op_flag(space, lst): op_flag = OpFlag() @@ -121,39 +72,38 @@ op_flag.allocate = True elif item == 'no_subtype': raise OperationError(space.w_NotImplementedError, space.wrap( - '"no_subtype" op_flag not implemented yet')) + '"no_subtype" op_flag not implemented yet')) elif item == 'arraymask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"arraymask" op_flag not implemented yet')) + '"arraymask" op_flag not implemented yet')) elif item == 'writemask': raise OperationError(space.w_NotImplementedError, space.wrap( - '"writemask" op_flag not implemented yet')) + '"writemask" op_flag not implemented yet')) else: raise OperationError(space.w_ValueError, space.wrap( - 'op_flags must be a tuple or array of per-op flag-tuples')) - if op_flag.rw == 'r': - op_flag.get_it_item = (get_readonly_item, get_readonly_slice) - elif op_flag.rw == 'rw': - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) - elif op_flag.rw == 'w': - # XXX Extra logic needed to make sure writeonly - op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) + 'op_flags must be a tuple or array of per-op flag-tuples')) + if op_flag.rw == '': + raise oefmt(space.w_ValueError, + "None of the iterator flags READWRITE, READONLY, or " + "WRITEONLY were specified for an operand") return op_flag + def parse_func_flags(space, nditer, w_flags): if space.is_w(w_flags, space.w_None): return elif not space.isinstance_w(w_flags, space.w_tuple) and not \ - space.isinstance_w(w_flags, space.w_list): + space.isinstance_w(w_flags, space.w_list): raise OperationError(space.w_ValueError, space.wrap( - 'Iter global flags must be a list or tuple of strings')) + 'Iter global flags must be a list or tuple of strings')) lst = space.listview(w_flags) for w_item in lst: if not space.isinstance_w(w_item, space.w_str) and not \ - space.isinstance_w(w_item, space.w_unicode): + space.isinstance_w(w_item, space.w_unicode): typename = space.type(w_item).getname(space) - raise OperationError(space.w_TypeError, space.wrap( - 'expected string or Unicode object, %s found' % typename)) + raise oefmt(space.w_TypeError, + 'expected string or Unicode object, %s found', + typename) item = space.str_w(w_item) if item == 'external_loop': raise OperationError(space.w_NotImplementedError, space.wrap( @@ -187,21 +137,24 @@ elif item == 'zerosize_ok': nditer.zerosize_ok = True else: - raise OperationError(space.w_ValueError, space.wrap( - 'Unexpected iterator global flag "%s"' % item)) + raise oefmt(space.w_ValueError, + 'Unexpected iterator global flag "%s"', + item) if nditer.tracked_index and nditer.external_loop: - raise OperationError(space.w_ValueError, space.wrap( - 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' - 'multi-index is being tracked')) + raise OperationError(space.w_ValueError, space.wrap( + 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' + 'multi-index is being tracked')) + def is_backward(imp, order): if order == 'K' or (order == 'C' and imp.order == 'C'): return False - elif order =='F' and imp.order == 'C': + elif order == 'F' and imp.order == 'C': return True else: raise NotImplementedError('not implemented yet') + def get_iter(space, order, arr, shape, dtype): imp = arr.implementation backward = is_backward(imp, order) @@ -223,19 +176,6 @@ shape, backward) return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) -def get_external_loop_iter(space, order, arr, shape): - imp = arr.implementation - backward = is_backward(imp, order) - return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) - -def convert_to_array_or_none(space, w_elem): - ''' - None will be passed through, all others will be converted - ''' - if space.is_none(w_elem): - return None - return convert_to_array(space, w_elem) - class IndexIterator(object): def __init__(self, shape, backward=False): @@ -263,10 +203,10 @@ ret += self.index[i] * self.shape[i - 1] return ret + class W_NDIter(W_Root): - def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, - w_op_axes, w_itershape, w_buffersize, order): + w_op_axes, w_itershape, w_buffersize, order): self.order = order self.external_loop = False self.buffered = False @@ -286,9 +226,11 @@ if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) - self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] + self.seq = [convert_to_array(space, w_elem) + if not space.is_none(w_elem) else None + for w_elem in w_seq_as_list] else: - self.seq =[convert_to_array(space, w_seq)] + self.seq = [convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, @@ -308,9 +250,9 @@ self.dtypes = [] # handle None or writable operands, calculate my shape - self.iters=[] - outargs = [i for i in range(len(self.seq)) \ - if self.seq[i] is None or self.op_flags[i].rw == 'w'] + self.iters = [] + outargs = [i for i in range(len(self.seq)) + if self.seq[i] is None or self.op_flags[i].rw == 'w'] if len(outargs) > 0: out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: @@ -325,14 +267,12 @@ out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: - self.op_flags[i].get_it_item = (get_readwrite_item, - get_readwrite_slice) self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue - out_dtype = ufuncs.find_binop_result_dtype(space, - self.seq[i].get_dtype(), out_dtype) + out_dtype = ufuncs.find_binop_result_dtype( + space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? @@ -360,8 +300,9 @@ self.dtypes[i] = seq_d elif selfd != seq_d: if not 'r' in self.op_flags[i].tmp_copy: - raise OperationError(space.w_TypeError, space.wrap( - "Iterator operand required copying or buffering for operand %d" % i)) + raise oefmt(space.w_TypeError, + "Iterator operand required copying or " + "buffering for operand %d", i) impl = self.seq[i].implementation new_impl = impl.astype(space, selfd) self.seq[i] = W_NDimArray(new_impl) @@ -370,18 +311,14 @@ self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand - if self.external_loop: - for i in range(len(self.seq)): - self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, - self.seq[i], iter_shape), self.op_flags[i])) - else: - for i in range(len(self.seq)): - self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i], iter_shape, self.dtypes[i]), - self.op_flags[i])) + for i in range(len(self.seq)): + it = get_iter(space, self.order, self.seq[i], iter_shape, self.dtypes[i]) + self.iters.append((it, it.reset())) + def set_op_axes(self, space, w_op_axes): if space.len_w(w_op_axes) != len(self.seq): - raise OperationError(space.w_ValueError, space.wrap("op_axes must be a tuple/list matching the number of ops")) + raise oefmt(space.w_ValueError, + "op_axes must be a tuple/list matching the number of ops") op_axes = space.listview(w_op_axes) l = -1 for w_axis in op_axes: @@ -390,10 +327,14 @@ if l == -1: l = axis_len elif axis_len != l: - raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) - self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 for x in space.listview(w_axis)]) + raise oefmt(space.w_ValueError, + "Each entry of op_axes must have the same size") + self.op_axes.append([space.int_w(x) if not space.is_none(x) else -1 + for x in space.listview(w_axis)]) if l == -1: - raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise oefmt(space.w_ValueError, + "If op_axes is provided, at least one list of axes " + "must be contained within it") raise Exception('xxx TODO') # Check that values make sense: # - in bounds for each operand @@ -404,24 +345,34 @@ def descr_iter(self, space): return space.wrap(self) + def getitem(self, it, st, op_flags): + if op_flags.rw == 'r': + impl = concrete.ConcreteNonWritableArrayWithBase + else: + impl = concrete.ConcreteArrayWithBase + res = impl([], it.array.dtype, it.array.order, [], [], + it.array.storage, self) + res.start = st.offset + return W_NDimArray(res) + def descr_getitem(self, space, w_idx): idx = space.int_w(w_idx) try: - ret = space.wrap(self.iters[idx].getitem(space, self.seq[idx])) + it, st = self.iters[idx] except IndexError: - raise OperationError(space.w_IndexError, space.wrap("Iterator operand index %d is out of bounds" % idx)) - return ret + raise oefmt(space.w_IndexError, + "Iterator operand index %d is out of bounds", idx) + return self.getitem(it, st, self.op_flags[idx]) def descr_setitem(self, space, w_idx, w_value): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_len(self, space): space.wrap(len(self.iters)) def descr_next(self, space): - for it in self.iters: - if not it.done(): + for it, st in self.iters: + if not it.done(st): break else: self.done = True @@ -432,20 +383,20 @@ self.index_iter.next() else: self.first_next = False - for i in range(len(self.iters)): - res.append(self.iters[i].getitem(space, self.seq[i])) - self.iters[i].next() - if len(res) <2: + for i, (it, st) in enumerate(self.iters): + res.append(self.getitem(it, st, self.op_flags[i])) + self.iters[i] = (it, it.next(st)) + if len(res) < 2: return res[0] return space.newtuple(res) def iternext(self): if self.index_iter: self.index_iter.next() - for i in range(len(self.iters)): - self.iters[i].next() - for it in self.iters: - if not it.done(): + for i, (it, st) in enumerate(self.iters): + self.iters[i] = (it, it.next(st)) + for it, st in self.iters: + if not it.done(st): break else: self.done = True @@ -456,29 +407,23 @@ return space.wrap(self.iternext()) def descr_copy(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_debug_print(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_enable_external_loop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") @unwrap_spec(axis=int) def descr_remove_axis(self, space, axis): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_remove_multi_index(self, space, w_multi_index): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_reset(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_operands(self, space): l_w = [] @@ -496,17 +441,16 @@ return space.wrap(self.done) def descr_get_has_delayed_bufalloc(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_has_index(self, space): return space.wrap(self.tracked_index in ["C", "F"]) def descr_get_index(self, space): if not self.tracked_index in ["C", "F"]: - raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) + raise oefmt(space.w_ValueError, "Iterator does not have an index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.wrap(self.index_iter.getvalue()) def descr_get_has_multi_index(self, space): @@ -514,51 +458,44 @@ def descr_get_multi_index(self, space): if not self.tracked_index == "multi": - raise OperationError(space.w_ValueError, space.wrap("Iterator is not tracking a multi-index")) + raise oefmt(space.w_ValueError, "Iterator is not tracking a multi-index") if self.done: - raise OperationError(space.w_ValueError, space.wrap("Iterator is past the end")) + raise oefmt(space.w_ValueError, "Iterator is past the end") return space.newtuple([space.wrap(x) for x in self.index_iter.index]) def descr_get_iterationneedsapi(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_iterindex(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_itersize(self, space): return space.wrap(support.product(self.shape)) def descr_get_itviews(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_ndim(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_nop(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_shape(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") def descr_get_value(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + raise oefmt(space.w_NotImplementedError, "not implemented yet") - at unwrap_spec(w_flags = WrappedDefault(None), w_op_flags=WrappedDefault(None), - w_op_dtypes = WrappedDefault(None), order=str, + at unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), + w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): + w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order) + w_itershape, w_buffersize, order) W_NDIter.typedef = TypeDef( 'nditer', diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -148,20 +148,22 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() index_impl = index_arr.implementation index_iter = AllButAxisIter(index_impl, axis) + index_state = index_iter.reset() stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): + while not arr_iter.done(arr_state): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + - index_iter.offset, i) + index_state.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, arr_iter.offset) + arr.get_storage(), storage, index_state.offset, arr_state.offset) ArgSort(r).sort() - arr_iter.next() - index_iter.next() + arr_state = arr_iter.next(arr_state) + index_state = index_iter.next(index_state) return index_arr return argsort @@ -292,12 +294,13 @@ if axis < 0 or axis >= len(shape): raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) + arr_state = arr_iter.reset() stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not arr_iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) + while not arr_iter.done(arr_state): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_state.offset) ArgSort(r).sort() - arr_iter.next() + arr_state = arr_iter.next(arr_state) return sort diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -233,30 +233,6 @@ return dtype -def to_coords(space, shape, size, order, w_item_or_slice): - '''Returns a start coord, step, and length. - ''' - start = lngth = step = 0 - if not (space.isinstance_w(w_item_or_slice, space.w_int) or - space.isinstance_w(w_item_or_slice, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) - - start, stop, step, lngth = space.decode_index4(w_item_or_slice, size) - - coords = [0] * len(shape) - i = start - if order == 'C': - for s in range(len(shape) -1, -1, -1): - coords[s] = i % shape[s] - i //= shape[s] - else: - for s in range(len(shape)): - coords[s] = i % shape[s] - i //= shape[s] - return coords, step, lngth - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -25,3 +25,18 @@ for x in s: i *= x return i + + +def check_and_adjust_index(space, index, size, axis): + if index < -size or index >= size: + if axis >= 0: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for axis %d with size %d", + index, axis, size) + else: + raise oefmt(space.w_IndexError, + "index %d is out of bounds for size %d", + index, size) + if index < 0: + index += size + return index diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -16,17 +16,18 @@ assert backstrides == [10, 4] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() - i.next() - assert i.offset == 3 - assert not i.done() - assert i.indices == [0,3] + s = i.reset() + s = i.next(s) + s = i.next(s) + s = i.next(s) + assert s.offset == 3 + assert not i.done(s) + assert s.indices == [0,3] #cause a dimension overflow - i.next() - i.next() - assert i.offset == 5 - assert i.indices == [1,0] + s = i.next(s) + s = i.next(s) + assert s.offset == 5 + assert s.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous @@ -35,17 +36,18 @@ assert backstrides == [2, 12] i = ArrayIter(MockArray, support.product(shape), shape, strides, backstrides) - i.next() - i.next() From noreply at buildbot.pypy.org Wed Apr 23 06:49:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 23 Apr 2014 06:49:47 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_fdopen on win32 Message-ID: <20140423044947.7868B1C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70881:594846ff8c89 Date: 2014-04-22 21:48 -0700 http://bitbucket.org/pypy/pypy/changeset/594846ff8c89/ Log: fix test_fdopen on win32 diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -47,7 +47,8 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal(fileno, [lltype.Ptr(FILE)], rffi.INT) -c_fdopen = llexternal('fdopen', [rffi.INT, rffi.CCHARP], lltype.Ptr(FILE)) +c_fdopen = llexternal(('_' if os.name == 'nt' else '') + 'fdopen', + [rffi.INT, rffi.CCHARP], lltype.Ptr(FILE)) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) From noreply at buildbot.pypy.org Wed Apr 23 08:27:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 23 Apr 2014 08:27:34 +0200 (CEST) Subject: [pypy-commit] pypy default: try a new ip for testing timeout Message-ID: <20140423062734.B900C1D2646@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70882:31e6f79cf3eb Date: 2014-04-23 02:26 -0400 http://bitbucket.org/pypy/pypy/changeset/31e6f79cf3eb/ Log: try a new ip for testing timeout diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -340,12 +340,8 @@ def test_connect_with_timeout_fail(): s = RSocket() s.settimeout(0.1) - if sys.platform == 'win32': - addr = '169.254.169.254' - else: - addr = '240.240.240.240' with py.test.raises(SocketTimeout): - s.connect(INETAddress(addr, 12345)) + s.connect(INETAddress('10.255.255.10', 12345)) s.close() def test_connect_with_timeout_succeed(): From noreply at buildbot.pypy.org Wed Apr 23 08:32:33 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 23 Apr 2014 08:32:33 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20140423063233.5CB2C1D2646@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70883:e1dfd6d4978f Date: 2014-04-22 23:27 -0700 http://bitbucket.org/pypy/pypy/changeset/e1dfd6d4978f/ Log: merge default into branch diff too long, truncating to 2000 out of 4434 lines diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -51,7 +51,7 @@ for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) - shutil.rmtree(self.temp_dir, True) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,7 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) - f.close() + f.close() def test_head(self): response = self.request( diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,6 +1,7 @@ import os, sys, imp import tempfile, binascii + def get_hashed_dir(cfile): with open(cfile,'r') as fid: content = fid.read() @@ -15,7 +16,7 @@ output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) - return output_dir + return output_dir def _get_c_extension_suffix(): diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -0,0 +1,154 @@ +======================= +What's new in PyPy 2.2+ +======================= + +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 + +.. branch: release-2.2.x + +.. branch: numpy-newbyteorder +Clean up numpy types, add newbyteorder functionality + +.. branch: windows-packaging +Package tk/tcl runtime with win32 + +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards + +.. branch: remove-intlong-smm +kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose + +.. branch: stdlib-2.7.6 +Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better + +.. branch: numpy-searchsorted +Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD + +.. branch: small-unroll-improvements +Improve optimization of small allocation-heavy loops in the JIT diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,145 +1,11 @@ ======================= -What's new in PyPy 2.2+ +What's new in PyPy 2.3+ ======================= -.. this is a revision shortly after release-2.2.x -.. startrev: 4cd1bc8b3111 +.. this is a revision shortly after release-2.3.x +.. startrev: ba569fe1efdb -.. branch: release-2.2.x -.. branch: numpy-newbyteorder -Clean up numpy types, add newbyteorder functionality -.. branch: windows-packaging -Package tk/tcl runtime with win32 - -.. branch: armhf-singlefloat -JIT support for singlefloats on ARM using the hardfloat ABI - -.. branch: voidtype_strformat -Better support for record numpy arrays - -.. branch: osx-eci-frameworks-makefile -OSX: Ensure frameworks end up in Makefile when specified in External compilation info - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace and annotator. - -.. branch: ndarray-buffer -adds support for the buffer= argument to the ndarray ctor - -.. branch: better_ftime_detect2 -On OpenBSD do not pull in libcompat.a as it is about to be removed. -And more generally, if you have gettimeofday(2) you will not need ftime(3). - -.. branch: timeb_h -Remove dependency upon on OpenBSD. This will be disappearing -along with libcompat.a. - -.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 -Fix 3 broken links on PyPy published papers in docs. - -.. branch: jit-ordereddict - -.. branch: refactor-str-types -Remove multimethods on str/unicode/bytearray and make the implementations share code. - -.. branch: remove-del-from-generatoriterator -Speed up generators that don't yield inside try or wait blocks by skipping -unnecessary cleanup. - -.. branch: annotator -Remove FlowObjSpace. -Improve cohesion between rpython.flowspace and rpython.annotator. - -.. branch: detect-immutable-fields -mapdicts keep track of whether or not an attribute is every assigned to -multiple times. If it's only assigned once then an elidable lookup is used when -possible. - -.. branch: precompiled-headers -Create a Makefile using precompiled headers for MSVC platforms. -The downside is a messy nmake-compatible Makefile. Since gcc shows minimal -speedup, it was not implemented. - -.. branch: camelot -With a properly configured 256-color terminal (TERM=...-256color), the -Mandelbrot set shown during translation now uses a range of 50 colours. -Essential! - -.. branch: NonConstant -Simplify implementation of NonConstant. - -.. branch: array-propagate-len -Kill some guards and operations in JIT traces by adding integer bounds -propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). - -.. branch: optimize-int-and -Optimize away INT_AND with constant mask of 1s that fully cover the bitrange -of other operand. - -.. branch: bounds-int-add-or -Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the -operands are positive to kill some guards - -.. branch: remove-intlong-smm -kills int/long/smalllong/bool multimethods - -.. branch: numpy-refactor -Cleanup micronumpy module - -.. branch: int_w-refactor -In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. - -.. branch: test-58c3d8552833 -Fix for getarrayitem_gc_pure optimization - -.. branch: simple-range-strategy -Implements SimpleRangeListStrategy for case range(n) where n is a positive number. -Makes some traces nicer by getting rid of multiplication for calculating loop counter -and propagates that n > 0 further to get rid of guards. - -.. branch: popen-pclose -Provide an exit status for popen'ed RFiles via pclose - -.. branch: stdlib-2.7.6 -Update stdlib to v2.7.6 - -.. branch: virtual-raw-store-load -Support for virtualizing raw_store/raw_load operations - -.. branch: refactor-buffer-api -Separate the interp-level buffer API from the buffer type exposed to -app-level. The `Buffer` class is now used by `W_MemoryView` and -`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was -an alias to `Buffer`, which was wrappable itself. - -.. branch: improve-consecutive-dict-lookups -Improve the situation when dict lookups of the same key are performed in a chain - -.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 -.. branch: test_SetFromErrnoWithFilename_NULL -.. branch: test_SetFromErrnoWithFilename__tweaks - -.. branch: refactor_PyErr_SetFromErrnoWithFilename -Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext - -.. branch: win32-fixes4 -fix more tests for win32 - -.. branch: latest-improve-doc -Fix broken links in documentation - -.. branch: ast-issue1673 -fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field - -.. branch: issue1514 -Fix issues with reimporting builtin modules - -.. branch: numpypy-nditer -Implement the core of nditer, without many of the fancy flags (external_loop, buffered) - -.. branch: numpy-speed -Separate iterator from its state so jit can optimize better +.. branch: small-unroll-improvements +Improve optimiziation of small allocation-heavy loops in the JIT diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -89,8 +89,23 @@ http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with -pypy. Unpack this folder in the base directory. Then open a command -prompt:: +pypy. Unpack this folder in the base directory. +The default GC_abort(...) function in misc.c will try to open a MessageBox. +You may want to disable this with the following patch:: + + --- a/misc.c Sun Apr 20 14:08:27 2014 +0300 + +++ b/misc.c Sun Apr 20 14:08:37 2014 +0300 + @@ -1058,7 +1058,7 @@ + #ifndef PCR + void GC_abort(const char *msg) + { + -# if defined(MSWIN32) + +# if 0 && defined(MSWIN32) + (void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK); + # else + GC_err_printf("%s\n", msg); + +Then open a command prompt:: cd gc-7.1 nmake -f NT_THREADS_MAKEFILE diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -26,5 +26,7 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) + assert lltype.typeOf(res) == rffi.LONG + assert rffi.cast(lltype.Signed, res) == 0 lltype.free(lls, flavor='raw') diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop, ufuncs +from pypy.module.micronumpy import descriptor, loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,7 +42,6 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support -from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1,7 +1,8 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ - WrappedDefault -from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr + WrappedDefault +from pypy.interpreter.typedef import TypeDef, GetSetProperty, \ + make_weakref_descr from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from rpython.rlib.rawstorage import RAW_STORAGE_PTR @@ -12,10 +13,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.arrayops import repeat, choose, put from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, \ - ArrayArgumentException, wrap_impl + ArrayArgumentException, wrap_impl from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.module.micronumpy.converters import order_converter, shape_converter, \ - multi_axis_converter +from pypy.module.micronumpy.converters import multi_axis_converter, \ + order_converter, shape_converter from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.strides import get_shape_from_iterable, \ @@ -33,15 +34,14 @@ right_critical_dim = len(right_shape) - 2 right_critical_dim_size = right_shape[right_critical_dim] assert right_critical_dim >= 0 - out_shape = out_shape + left_shape[:-1] + \ - right_shape[0:right_critical_dim] + \ - right_shape[right_critical_dim + 1:] + out_shape = (out_shape + left_shape[:-1] + + right_shape[0:right_critical_dim] + + right_shape[right_critical_dim + 1:]) elif len(right_shape) > 0: #dot does not reduce for scalars out_shape = out_shape + left_shape[:-1] if my_critical_dim_size != right_critical_dim_size: - raise OperationError(space.w_ValueError, space.wrap( - "objects are not aligned")) + raise oefmt(space.w_ValueError, "objects are not aligned") return out_shape, right_critical_dim @@ -55,8 +55,8 @@ return self.implementation.get_shape() def descr_set_shape(self, space, w_new_shape): - self.implementation = self.implementation.set_shape(space, self, - get_shape_from_iterable(space, self.get_size(), w_new_shape)) + shape = get_shape_from_iterable(space, self.get_size(), w_new_shape) + self.implementation = self.implementation.set_shape(space, self, shape) def descr_get_strides(self, space): strides = self.implementation.get_strides() @@ -72,8 +72,8 @@ return self.implementation.dtype def descr_set_dtype(self, space, w_dtype): - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) if (dtype.elsize != self.get_dtype().elsize or dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( @@ -115,7 +115,8 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), + w_instance=self) return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): @@ -128,10 +129,10 @@ size = loop.count_all_true(idx) if size > val.get_size() and val.get_size() != 1: raise oefmt(space.w_ValueError, - "NumPy boolean array indexing assignment " - "cannot assign %d input values to " - "the %d output values where the mask is true", - val.get_size(), size) + "NumPy boolean array indexing assignment " + "cannot assign %d input values to " + "the %d output values where the mask is true", + val.get_size(), size) loop.setitem_filter(space, self, idx, val) def _prepare_array_index(self, space, w_index): @@ -151,7 +152,7 @@ prefix = [] for i, w_item in enumerate(w_lst): if (isinstance(w_item, W_NDimArray) or - space.isinstance_w(w_item, space.w_list)): + space.isinstance_w(w_item, space.w_list)): w_item = convert_to_array(space, w_item) if shape is None: shape = w_item.get_shape() @@ -163,7 +164,7 @@ arr_index_in_shape = True else: if space.isinstance_w(w_item, space.w_slice): - _, _, _, lgt = space.decode_index4(w_item, self.get_shape()[i]) + lgt = space.decode_index4(w_item, self.get_shape()[i])[3] if not arr_index_in_shape: prefix.append(w_item) res_shape.append(lgt) @@ -178,7 +179,7 @@ def getitem_array_int(self, space, w_index): prefix, res_shape, iter_shape, indexes = \ - self._prepare_array_index(space, w_index) + self._prepare_array_index(space, w_index) if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) @@ -194,7 +195,7 @@ def setitem_array_int(self, space, w_index, w_value): val_arr = convert_to_array(space, w_value) prefix, _, iter_shape, indexes = \ - self._prepare_array_index(space, w_index) + self._prepare_array_index(space, w_index) if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) @@ -331,8 +332,8 @@ def descr_set_imag(self, space, w_value): # if possible, copy (broadcast) values into self if not self.get_dtype().is_complex(): - raise OperationError(space.w_TypeError, - space.wrap('array does not have imaginary part to set')) + raise oefmt(space.w_TypeError, + 'array does not have imaginary part to set') self.implementation.set_imag(space, self, w_value) def reshape(self, space, w_shape): @@ -481,7 +482,7 @@ assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, - "can only convert an array of size 1 to a Python scalar") + "can only convert an array of size 1 to a Python scalar") elif len(args_w) == 1 and len(shape) != 1: value = support.index_w(space, args_w[0]) value = support.check_and_adjust_index(space, value, self.get_size(), -1) @@ -533,6 +534,7 @@ return w_d w_pypy_data = None + def fget___pypy_data__(self, space): return self.w_pypy_data @@ -556,16 +558,16 @@ def descr_astype(self, space, w_dtype): cur_dtype = self.get_dtype() - new_dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + new_dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: - new_dtype = descriptor.variable_dtype(space, - 'S' + str(cur_dtype.elsize)) + new_dtype = descriptor.variable_dtype( + space, 'S' + str(cur_dtype.elsize)) impl = self.implementation new_impl = impl.astype(space, new_dtype) return wrap_impl(space, space.type(self), self, new_impl) @@ -583,7 +585,8 @@ loop.byteswap(self.implementation, self.implementation) return self else: - w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_instance=self) + w_res = W_NDimArray.from_shape(space, self.get_shape(), + self.get_dtype(), w_instance=self) loop.byteswap(self.implementation, w_res.implementation) return w_res @@ -599,8 +602,7 @@ min = convert_to_array(space, w_min) max = convert_to_array(space, w_max) shape = shape_agreement_multiple(space, [self, min, max, w_out]) - out = descriptor.dtype_agreement(space, [self, min, max], shape, - w_out) + out = descriptor.dtype_agreement(space, [self, min, max], shape, w_out) loop.clip(space, self, shape, min, max, out) return out @@ -620,15 +622,14 @@ raise OperationError(space.w_ValueError, space.wrap( "need at least 2 dimensions for diagonal")) if (axis1 < 0 or axis2 < 0 or axis1 >= len(self.get_shape()) or - axis2 >= len(self.get_shape())): + axis2 >= len(self.get_shape())): raise oefmt(space.w_ValueError, "axis1(=%d) and axis2(=%d) must be withing range " "(ndim=%d)", axis1, axis2, len(self.get_shape())) if axis1 == axis2: raise OperationError(space.w_ValueError, space.wrap( "axis1 and axis2 cannot be the same")) - return arrayops.diagonal(space, self.implementation, offset, - axis1, axis2) + return arrayops.diagonal(space, self.implementation, offset, axis1, axis2) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_trace(self, space, offset=0, axis1=0, axis2=1, @@ -645,6 +646,7 @@ "dumps not implemented yet")) w_flags = None + def descr_get_flags(self, space): if self.w_flags is None: self.w_flags = W_FlagsObject(self) @@ -657,8 +659,8 @@ @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): - return self.descr_view(space, - self.get_dtype().descr_newbyteorder(space, new_order)) + return self.descr_view( + space, self.get_dtype().descr_newbyteorder(space, new_order)) @unwrap_spec(w_axis=WrappedDefault(None), w_out=WrappedDefault(None)) @@ -677,29 +679,49 @@ def descr_round(self, space, decimals=0, w_out=None): if space.is_none(w_out): if self.get_dtype().is_bool(): - #numpy promotes bool.round() to float16. Go figure. + # numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), - descriptor.get_dtype_cache(space).w_float16dtype) + descriptor.get_dtype_cache(space).w_float16dtype) else: w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) - out = descriptor.dtype_agreement(space, [self], self.get_shape(), - w_out) + out = descriptor.dtype_agreement(space, [self], self.get_shape(), w_out) if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = descriptor.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() if decimals == 0: - out = out.descr_view(space,space.type(self)) + out = out.descr_view(space, space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out - def descr_searchsorted(self, space, w_v, w_side='left'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "searchsorted not implemented yet")) + @unwrap_spec(side=str, w_sorter=WrappedDefault(None)) + def descr_searchsorted(self, space, w_v, side='left', w_sorter=None): + if not space.is_none(w_sorter): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'sorter not supported in searchsort')) + if not side or len(side) < 1: + raise OperationError(space.w_ValueError, space.wrap( + "expected nonempty string for keyword 'side'")) + elif side[0] == 'l' or side[0] == 'L': + side = 'l' + elif side[0] == 'r' or side[0] == 'R': + side = 'r' + else: + raise oefmt(space.w_ValueError, + "'%s' is an invalid value for keyword 'side'", side) + if len(self.get_shape()) > 1: + raise oefmt(space.w_ValueError, "a must be a 1-d array") + v = convert_to_array(space, w_v) + if len(v.get_shape()) > 1: + raise oefmt(space.w_ValueError, "v must be a 1-d array-like") + ret = W_NDimArray.from_shape( + space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) + app_searchsort(space, self, v, space.wrap(side), ret) + return ret def descr_setasflat(self, space, w_v): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -730,7 +752,7 @@ if axes[i]: if cur_shape[i] != 1: raise OperationError(space.w_ValueError, space.wrap( - "cannot select an axis to squeeze out " \ + "cannot select an axis to squeeze out " "which has size greater than one")) else: new_shape.append(cur_shape[i]) @@ -763,9 +785,8 @@ else: raise if w_dtype: - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), - w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) else: dtype = self.get_dtype() old_itemsize = self.get_dtype().elsize @@ -807,8 +828,8 @@ def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) descr_pos = _unaryop_impl("positive") @@ -821,14 +842,15 @@ def descr___nonzero__(self, space): if self.get_size() > 1: raise OperationError(space.w_ValueError, space.wrap( - "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()")) + "The truth value of an array with more than one element " + "is ambiguous. Use a.any() or a.all()")) iter, state = self.create_iter() return space.wrap(space.is_true(iter.getitem(state))) def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) descr_add = _binop_impl("add") @@ -892,7 +914,8 @@ def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) - return getattr(ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -918,8 +941,7 @@ if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): - raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + raise oefmt(space.w_TypeError, 'output must be an array') else: out = w_out other = convert_to_array(space, w_other) @@ -931,7 +953,7 @@ assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) dtype = ufuncs.find_binop_result_dtype(space, self.get_dtype(), - other.get_dtype()) + other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) @@ -963,16 +985,16 @@ other_critical_dim) def descr_mean(self, space, __args__): - return get_appbridge_cache(space).call_method(space, - 'numpy.core._methods', '_mean', __args__.prepend(self)) + return get_appbridge_cache(space).call_method( + space, 'numpy.core._methods', '_mean', __args__.prepend(self)) def descr_var(self, space, __args__): - return get_appbridge_cache(space).call_method(space, - 'numpy.core._methods', '_var', __args__.prepend(self)) + return get_appbridge_cache(space).call_method( + space, 'numpy.core._methods', '_var', __args__.prepend(self)) def descr_std(self, space, __args__): - return get_appbridge_cache(space).call_method(space, - 'numpy.core._methods', '_std', __args__.prepend(self)) + return get_appbridge_cache(space).call_method( + space, 'numpy.core._methods', '_std', __args__.prepend(self)) # ----------------------- reduce ------------------------------- @@ -982,8 +1004,7 @@ if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): - raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + raise oefmt(space.w_TypeError, 'output must be an array') else: out = w_out return getattr(ufuncs.get(space), ufunc_name).reduce( @@ -1005,13 +1026,13 @@ def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): raise oefmt(space.w_NotImplementedError, - "axis unsupported for %s", op_name) + "axis unsupported for %s", op_name) if not space.is_none(w_out): raise oefmt(space.w_NotImplementedError, - "out unsupported for %s", op_name) + "out unsupported for %s", op_name) if self.get_size() == 0: raise oefmt(space.w_ValueError, - "Can't call %s on zero-size arrays", op_name) + "Can't call %s on zero-size arrays", op_name) try: getattr(self.get_dtype().itemtype, raw_name) except AttributeError: @@ -1094,8 +1115,8 @@ multiarray = numpypy.get("multiarray") assert isinstance(multiarray, MixedModule) reconstruct = multiarray.get("_reconstruct") - parameters = space.newtuple([self.getclass(space), - space.newtuple([space.wrap(0)]), space.wrap("b")]) + parameters = space.newtuple([self.getclass(space), space.newtuple( + [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() if isinstance(self.implementation, SliceArray): @@ -1105,15 +1126,16 @@ builder.append(box.raw_str()) state = iter.next(state) else: - builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) + builder.append_charpsize(self.implementation.get_storage(), + self.implementation.get_storage_size()) state = space.newtuple([ - space.wrap(1), # version - self.descr_get_shape(space), - self.get_dtype(), - space.wrap(False), # is_fortran - space.wrap(builder.build()), - ]) + space.wrap(1), # version + self.descr_get_shape(space), + self.get_dtype(), + space.wrap(False), # is_fortran + space.wrap(builder.build()), + ]) return space.newtuple([reconstruct, parameters, state]) @@ -1126,19 +1148,20 @@ base_index = 0 else: raise oefmt(space.w_ValueError, - "__setstate__ called with len(args[1])==%d, not 5 or 4", lens) + "__setstate__ called with len(args[1])==%d, not 5 or 4", + lens) shape = space.getitem(w_state, space.wrap(base_index)) dtype = space.getitem(w_state, space.wrap(base_index+1)) #isfortran = space.getitem(w_state, space.wrap(base_index+2)) storage = space.getitem(w_state, space.wrap(base_index+3)) if not isinstance(dtype, descriptor.W_Dtype): raise oefmt(space.w_ValueError, - "__setstate__(self, (shape, dtype, .. called with " - "improper dtype '%R'", dtype) - self.implementation = W_NDimArray.from_shape_and_storage(space, - [space.int_w(i) for i in space.listview(shape)], - rffi.str2charp(space.str_w(storage), track_allocation=False), - dtype, owning=True).implementation + "__setstate__(self, (shape, dtype, .. called with " + "improper dtype '%R'", dtype) + self.implementation = W_NDimArray.from_shape_and_storage( + space, [space.int_w(i) for i in space.listview(shape)], + rffi.str2charp(space.str_w(storage), track_allocation=False), + dtype, owning=True).implementation def descr___array_finalize__(self, space, w_obj): pass @@ -1156,8 +1179,8 @@ offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy.strides import calc_strides - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) if not space.is_none(w_buffer): @@ -1193,8 +1216,7 @@ if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) - impl = ConcreteArray(shape, dtype.base, order, strides, - backstrides) + impl = ConcreteArray(shape, dtype.base, order, strides, backstrides) w_ret = space.allocate_instance(W_NDimArray, w_subtype) W_NDimArray.__init__(w_ret, impl) space.call_function(space.getattr(w_ret, @@ -1209,16 +1231,15 @@ PyPy-only implementation detail. """ storage = rffi.cast(RAW_STORAGE_PTR, addr) - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), - w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, space.call_function( + space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) if w_subtype: if not space.isinstance_w(w_subtype, space.w_type): raise OperationError(space.w_ValueError, space.wrap( "subtype must be a subtype of ndarray, not a class instance")) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - 'C', False, w_subtype) + 'C', False, w_subtype) else: return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) @@ -1252,6 +1273,31 @@ return res """, filename=__file__).interphook('ptp') +app_searchsort = applevel(r""" + def searchsort(arr, v, side, result): + import operator + def func(a, op, val): + imin = 0 + imax = a.size + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if op(a[imid], val): + imin = imid +1 + else: + imax = imid + return imin + if side == 'l': + op = operator.lt + else: + op = operator.le + if v.size < 2: + result[...] = func(arr, op, v) + else: + for i in range(v.size): + result[i] = func(arr, op, v[i]) + return result +""", filename=__file__).interphook('searchsort') + W_NDimArray.typedef = TypeDef("ndarray", __module__ = "numpy", __new__ = interp2app(descr_new_array), @@ -1355,6 +1401,7 @@ dot = interp2app(W_NDimArray.descr_dot), var = interp2app(W_NDimArray.descr_var), std = interp2app(W_NDimArray.descr_std), + searchsorted = interp2app(W_NDimArray.descr_searchsorted), cumsum = interp2app(W_NDimArray.descr_cumsum), cumprod = interp2app(W_NDimArray.descr_cumprod), diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -341,7 +341,7 @@ assert (x == y).all() def test_string_mergesort(self): - import numpypy as np + import numpy as np import sys x = np.array(['a'] * 32) if '__pypy__' in sys.builtin_module_names: @@ -349,3 +349,16 @@ assert 'non-numeric types' in exc.value.message else: assert (x.argsort(kind='m') == np.arange(32)).all() + + def test_searchsort(self): + from numpy import arange + import sys + a = arange(1, 6) + ret = a.searchsorted(3) + assert ret == 2 + ret = a.searchsorted(3, side='right') + assert ret == 3 + ret = a.searchsorted([-10, 10, 2, 3]) + assert (ret == [0, 5, 1, 2]).all() + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))") diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1039,28 +1039,6 @@ init_defaults = [None, None, None] -# XXX consider moving to W_BytearrayObject or remove -def str_join__Bytearray_ANY(space, w_self, w_list): - list_w = space.listview(w_list) - if not list_w: - return W_BytearrayObject([]) - data = w_self.data - newdata = [] - for i in range(len(list_w)): - w_s = list_w[i] - if not (space.isinstance_w(w_s, space.w_str) or - space.isinstance_w(w_s, space.w_bytearray)): - raise oefmt(space.w_TypeError, - "sequence item %d: expected string, %T found", i, w_s) - - if data and i != 0: - newdata.extend(data) - newdata.extend([c for c in space.bufferstr_new_w(w_s)]) - return W_BytearrayObject(newdata) - -_space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) - - # XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): if slicelength == 0: diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -106,7 +106,7 @@ def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, - resume_at_jump_descr, full_preamble_needed=True, + full_preamble_needed=True, try_disabling_unroll=False): """Try to compile a new procedure by closing the current history back to the first operation. @@ -128,7 +128,6 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ [h_ops[i].clone() for i in range(start, len(h_ops))] + \ [ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token)] @@ -187,7 +186,7 @@ def compile_retrace(metainterp, greenkey, start, inputargs, jumpargs, - resume_at_jump_descr, partial_trace, resumekey): + partial_trace, resumekey): """Try to compile a new procedure by closing the current history back to the first operation. """ @@ -203,7 +202,6 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] - part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ @@ -765,7 +763,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey, resume_at_jump_descr=None): +def compile_trace(metainterp, resumekey): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -781,7 +779,6 @@ # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] - new_trace.resume_at_jump_descr = resume_at_jump_descr metainterp_sd = metainterp.staticdata state = metainterp.jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -628,7 +628,6 @@ call_pure_results = None logops = None quasi_immutable_deps = None - resume_at_jump_descr = None def _token(*args): raise Exception("TreeLoop.token is killed") diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -31,6 +31,12 @@ def clone(self): return LenBound(self.mode, self.descr, self.bound.clone()) + def generalization_of(self, other): + return (other is not None and + self.mode == other.mode and + self.descr == other.descr and + self.bound.contains_bound(other.bound)) + class OptValue(object): __metaclass__ = extendabletype _attrs_ = ('box', 'known_class', 'last_guard', 'level', 'intbound', 'lenbound') @@ -129,13 +135,21 @@ def force_at_end_of_preamble(self, already_forced, optforce): return self - def get_args_for_fail(self, modifier): + # visitor API + + def visitor_walk_recursive(self, visitor): pass - def make_virtual_info(self, modifier, fieldnums): - #raise NotImplementedError # should not be called on this level - assert fieldnums is None - return modifier.make_not_virtual(self) + @specialize.argtype(1) + def visitor_dispatch_virtual_type(self, visitor): + if self.is_virtual(): + return self._visitor_dispatch_virtual_type(visitor) + else: + return visitor.visit_not_virtual(self) + + @specialize.argtype(1) + def _visitor_dispatch_virtual_type(self, visitor): + assert 0, "unreachable" def is_constant(self): return self.level == LEVEL_CONSTANT diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -543,6 +543,9 @@ return self.emit_operation(op) + def optimize_GUARD_FUTURE_CONDITION(self, op): + pass # just remove it + def optimize_INT_FLOORDIV(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -61,6 +61,9 @@ op.setdescr(descr.target_tokens[0]) self.emit_operation(op) + def optimize_GUARD_FUTURE_CONDITION(self, op): + pass + dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) OptSimplify.propagate_forward = dispatch_opt diff --git a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, Storage, _sortboxes, FakeDescrWithSnapshot, + LLtypeMixin, BaseTest, Storage, _sortboxes, FakeMetaInterpStaticData) from rpython.jit.metainterp.history import TreeLoop, JitCellToken, TargetToken from rpython.jit.metainterp.resoperation import rop, opname, ResOperation @@ -8,6 +8,8 @@ from py.test import raises from rpython.jit.metainterp.optimizeopt.optimizer import Optimization from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimizeopt.heap import OptHeap +from rpython.jit.metainterp.optimizeopt.rewrite import OptRewrite class BaseTestMultiLabel(BaseTest): @@ -20,7 +22,6 @@ part = TreeLoop('part') part.inputargs = loop.inputargs - part.resume_at_jump_descr = FakeDescrWithSnapshot() token = loop.original_jitcell_token optimized = TreeLoop('optimized') @@ -42,6 +43,7 @@ operations.append(label) part.operations = operations + self.add_guard_future_condition(part) self._do_optimize_loop(part, None) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] @@ -502,7 +504,7 @@ self.loop = loop loop.call_pure_results = args_dict() metainterp_sd = FakeMetaInterpStaticData(self.cpu) - optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True) + optimize_unroll(metainterp_sd, loop, [OptRewrite(), OptRenameStrlen(), OptHeap(), OptPure()], True) def test_optimizer_renaming_boxes1(self): ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -61,24 +61,6 @@ lst6 = virt1._get_field_descr_list() assert lst6 is lst3 -def test_reuse_vinfo(): - class FakeVInfo(object): - def set_content(self, fieldnums): - self.fieldnums = fieldnums - def equals(self, fieldnums): - return self.fieldnums == fieldnums - class FakeVirtualValue(virtualize.AbstractVirtualValue): - def _make_virtual(self, *args): - return FakeVInfo() - v1 = FakeVirtualValue(None, None) - vinfo1 = v1.make_virtual_info(None, [1, 2, 4]) - vinfo2 = v1.make_virtual_info(None, [1, 2, 4]) - assert vinfo1 is vinfo2 - vinfo3 = v1.make_virtual_info(None, [1, 2, 6]) - assert vinfo3 is not vinfo2 - vinfo4 = v1.make_virtual_info(None, [1, 2, 6]) - assert vinfo3 is vinfo4 - def test_descrlist_dict(): from rpython.jit.metainterp.optimizeopt import util as optimizeutil h1 = optimizeutil.descrlist_hash([]) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -51,7 +51,8 @@ if expected_preamble: expected_preamble = self.parse(expected_preamble) if expected_short: - expected_short = self.parse(expected_short) + # the short preamble doesn't have fail descrs, they are patched in when it is used + expected_short = self.parse(expected_short, want_fail_descr=False) preamble = self.unroll_and_optimize(loop, call_pure_results) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -355,11 +355,21 @@ class BaseTest(object): - def parse(self, s, boxkinds=None): + def parse(self, s, boxkinds=None, want_fail_descr=True): + if want_fail_descr: + invent_fail_descr = self.invent_fail_descr + else: + invent_fail_descr = lambda *args: None return parse(s, self.cpu, self.namespace, type_system=self.type_system, boxkinds=boxkinds, - invent_fail_descr=self.invent_fail_descr) + invent_fail_descr=invent_fail_descr) + + def add_guard_future_condition(self, res): + # invent a GUARD_FUTURE_CONDITION to not have to change all tests + if res.operations[-1].getopnum() == rop.JUMP: + guard = ResOperation(rop.GUARD_FUTURE_CONDITION, [], None, descr=self.invent_fail_descr(None, -1, [])) + res.operations.insert(-1, guard) def invent_fail_descr(self, model, opnum, fail_args): if fail_args is None: @@ -397,6 +407,7 @@ optimize_trace(metainterp_sd, loop, self.enable_opts) def unroll_and_optimize(self, loop, call_pure_results=None): + self.add_guard_future_condition(loop) operations = loop.operations jumpop = operations[-1] assert jumpop.getopnum() == rop.JUMP @@ -408,7 +419,6 @@ preamble = TreeLoop('preamble') preamble.inputargs = inputargs - preamble.resume_at_jump_descr = FakeDescrWithSnapshot() token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ @@ -419,7 +429,6 @@ assert preamble.operations[-1].getopnum() == rop.LABEL inliner = Inliner(inputargs, jump_args) - loop.resume_at_jump_descr = preamble.resume_at_jump_descr loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], @@ -450,18 +459,6 @@ def __eq__(self, other): return isinstance(other, FakeDescr) -class FakeDescrWithSnapshot(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescrWithSnapshot() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) - - def convert_old_style_to_targets(loop, jump): newloop = TreeLoop(loop.name) newloop.inputargs = loop.inputargs diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -1,43 +1,103 @@ from __future__ import with_statement import py -from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \ - VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes + VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes, GenerateGuardState, \ + VirtualStatesCantMatch, VArrayStructStateInfo from rpython.jit.metainterp.optimizeopt.optimizer import OptValue from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin, BaseTest, \ - equaloplists, FakeDescrWithSnapshot + equaloplists from rpython.jit.metainterp.optimizeopt.intutils import IntBound +from rpython.jit.metainterp.optimizeopt.virtualize import (VirtualValue, + VArrayValue, VStructValue, VArrayStructValue) from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import ResOperation, rop -class TestBasic: - someptr1 = LLtypeMixin.myptr - someptr2 = LLtypeMixin.myptr2 +class BaseTestGenerateGuards(BaseTest): + + def _box_or_value(self, box_or_value=None): + if box_or_value is None: + return None, None + elif isinstance(box_or_value, OptValue): + value = box_or_value + box = value.box + else: + box = box_or_value + value = OptValue(box) + return value, box + + def guards(self, info1, info2, box_or_value, expected, inputargs=None): + value, box = self._box_or_value(box_or_value) + if inputargs is None: + inputargs = [box] + info1.position = info2.position = 0 + state = GenerateGuardState(self.cpu) + info1.generate_guards(info2, value, state) + self.compare(state.extra_guards, expected, inputargs) + + def compare(self, guards, expected, inputargs): + loop = self.parse(expected) + boxmap = {} + assert len(loop.inputargs) == len(inputargs) + for a, b in zip(loop.inputargs, inputargs): + boxmap[a] = b + for op in loop.operations: + if op.is_guard(): + op.setdescr(None) + assert equaloplists(guards, loop.operations, False, + boxmap) + + def check_no_guards(self, info1, info2, box_or_value=None, state=None): + value, _ = self._box_or_value(box_or_value) + if info1.position == -1: + info1.position = 0 + if info2.position == -1: + info2.position = 0 + if state is None: + state = GenerateGuardState(self.cpu) + info1.generate_guards(info2, value, state) + assert not state.extra_guards + return state + + def check_invalid(self, info1, info2, box_or_value=None, state=None): + value, _ = self._box_or_value(box_or_value) + if info1.position == -1: + info1.position = 0 + if info2.position == -1: + info2.position = 0 + if state is None: + state = GenerateGuardState(self.cpu) + with py.test.raises(VirtualStatesCantMatch): + info1.generate_guards(info2, value, state) + def test_position_generalization(self): def postest(info1, info2): info1.position = 0 - assert info1.generalization_of(info1, {}, {}) + self.check_no_guards(info1, info1) info2.position = 0 - assert info1.generalization_of(info2, {}, {}) + self.check_no_guards(info1, info2) info2.position = 1 - renum = {} - assert info1.generalization_of(info2, renum, {}) - assert renum == {0:1} - assert info1.generalization_of(info2, {0:1}, {}) - assert info1.generalization_of(info2, {1:1}, {}) - bad = {} - assert not info1.generalization_of(info2, {0:0}, bad) - assert info1 in bad and info2 in bad + state = self.check_no_guards(info1, info2) + assert state.renum == {0:1} + + assert self.check_no_guards(info1, info2, state=state) + + # feed fake renums + state.renum = {1: 1} + self.check_no_guards(info1, info2, state=state) + + state.renum = {0: 0} + self.check_invalid(info1, info2, state=state) + assert info1 in state.bad and info2 in state.bad for BoxType in (BoxInt, BoxFloat, BoxPtr): info1 = NotVirtualStateInfo(OptValue(BoxType())) info2 = NotVirtualStateInfo(OptValue(BoxType())) postest(info1, info2) - + info1, info2 = VArrayStateInfo(42), VArrayStateInfo(42) info1.fieldstate = info2.fieldstate = [] postest(info1, info2) @@ -56,7 +116,7 @@ info1.position = 0 info2 = NotVirtualStateInfo(value2) info2.position = 0 - return info1.generalization_of(info2, {}, {}) + return VirtualState([info1]).generalization_of(VirtualState([info2]), cpu=self.cpu) assert isgeneral(OptValue(BoxInt()), OptValue(ConstInt(7))) assert not isgeneral(OptValue(ConstInt(7)), OptValue(BoxInt())) @@ -65,10 +125,11 @@ nonnull = OptValue(BoxPtr()) nonnull.make_nonnull(0) knownclass = OptValue(BoxPtr()) - knownclass.make_constant_class(ConstPtr(self.someptr1), 0) + clsbox = self.cpu.ts.cls_of_box(BoxPtr(self.myptr)) + knownclass.make_constant_class(clsbox, 0) const = OptValue(BoxPtr) - const.make_constant_class(ConstPtr(self.someptr1), 0) - const.make_constant(ConstPtr(self.someptr1)) + const.make_constant_class(clsbox, 0) + const.make_constant(ConstPtr(self.myptr)) inorder = [ptr, nonnull, knownclass, const] for i in range(len(inorder)): for j in range(i, len(inorder)): @@ -91,48 +152,51 @@ value1 = OptValue(BoxPtr()) value1.make_nonnull(None) - value2 = OptValue(ConstPtr(LLtypeMixin.nullptr)) + value2 = OptValue(ConstPtr(self.nullptr)) assert not isgeneral(value1, value2) def test_field_matching_generalization(self): const1 = NotVirtualStateInfo(OptValue(ConstInt(1))) const2 = NotVirtualStateInfo(OptValue(ConstInt(2))) const1.position = const2.position = 1 - assert not const1.generalization_of(const2, {}, {}) - assert not const2.generalization_of(const1, {}, {}) + self.check_invalid(const1, const2) + self.check_invalid(const2, const1) def fldtst(info1, info2): info1.position = info2.position = 0 info1.fieldstate = [const1] info2.fieldstate = [const2] - assert not info1.generalization_of(info2, {}, {}) - assert not info2.generalization_of(info1, {}, {}) - assert info1.generalization_of(info1, {}, {}) - assert info2.generalization_of(info2, {}, {}) - fldtst(VArrayStateInfo(42), VArrayStateInfo(42)) - fldtst(VStructStateInfo(42, [7]), VStructStateInfo(42, [7])) - fldtst(VirtualStateInfo(ConstInt(42), [7]), VirtualStateInfo(ConstInt(42), [7])) + self.check_invalid(info1, info2) + self.check_invalid(info2, info1) + self.check_no_guards(info1, info1) + self.check_no_guards(info2, info2) + fakedescr = object() + fielddescr = object() + fldtst(VArrayStateInfo(fakedescr), VArrayStateInfo(fakedescr)) + fldtst(VStructStateInfo(fakedescr, [fielddescr]), VStructStateInfo(fakedescr, [fielddescr])) + fldtst(VirtualStateInfo(ConstInt(42), [fielddescr]), VirtualStateInfo(ConstInt(42), [fielddescr])) + fldtst(VArrayStructStateInfo(fakedescr, [[fielddescr]]), VArrayStructStateInfo(fakedescr, [[fielddescr]])) def test_known_class_generalization(self): knownclass1 = OptValue(BoxPtr()) - knownclass1.make_constant_class(ConstPtr(self.someptr1), 0) + knownclass1.make_constant_class(ConstPtr(self.myptr), 0) info1 = NotVirtualStateInfo(knownclass1) info1.position = 0 knownclass2 = OptValue(BoxPtr()) - knownclass2.make_constant_class(ConstPtr(self.someptr1), 0) + knownclass2.make_constant_class(ConstPtr(self.myptr), 0) info2 = NotVirtualStateInfo(knownclass2) info2.position = 0 - assert info1.generalization_of(info2, {}, {}) - assert info2.generalization_of(info1, {}, {}) + self.check_no_guards(info1, info2) + self.check_no_guards(info2, info1) knownclass3 = OptValue(BoxPtr()) - knownclass3.make_constant_class(ConstPtr(self.someptr2), 0) + knownclass3.make_constant_class(ConstPtr(self.myptr2), 0) info3 = NotVirtualStateInfo(knownclass3) info3.position = 0 - assert not info1.generalization_of(info3, {}, {}) - assert not info2.generalization_of(info3, {}, {}) - assert not info3.generalization_of(info2, {}, {}) - assert not info3.generalization_of(info1, {}, {}) + self.check_invalid(info1, info3) + self.check_invalid(info2, info3) + self.check_invalid(info3, info2) + self.check_invalid(info3, info1) def test_circular_generalization(self): @@ -140,29 +204,157 @@ VirtualStateInfo(ConstInt(42), [7])): info.position = 0 info.fieldstate = [info] - assert info.generalization_of(info, {}, {}) + self.check_no_guards(info, info) -class BaseTestGenerateGuards(BaseTest): - def guards(self, info1, info2, box, expected): - info1.position = info2.position = 0 - guards = [] - info1.generate_guards(info2, box, self.cpu, guards, {}) - self.compare(guards, expected, [box]) + def test_generate_guards_nonvirtual_all_combinations(self): + # set up infos + unknown_val = OptValue(self.nodebox) + unknownnull_val = OptValue(BoxPtr(self.nullptr)) + unknown_info = NotVirtualStateInfo(unknown_val) - def compare(self, guards, expected, inputargs): - loop = self.parse(expected) - boxmap = {} - assert len(loop.inputargs) == len(inputargs) - for a, b in zip(loop.inputargs, inputargs): - boxmap[a] = b - for op in loop.operations: - if op.is_guard(): - op.setdescr(None) - assert equaloplists(guards, loop.operations, False, - boxmap) + nonnull_val = OptValue(self.nodebox) + nonnull_val.make_nonnull(None) + nonnull_info = NotVirtualStateInfo(nonnull_val) + + knownclass_val = OptValue(self.nodebox) + classbox = self.cpu.ts.cls_of_box(self.nodebox) + knownclass_val.make_constant_class(classbox, -1) + knownclass_info = NotVirtualStateInfo(knownclass_val) + knownclass2_val = OptValue(self.nodebox2) + classbox = self.cpu.ts.cls_of_box(self.nodebox2) + knownclass2_val.make_constant_class(classbox, -1) + knownclass2_info = NotVirtualStateInfo(knownclass2_val) + + constant_val = OptValue(BoxInt()) + constant_val.make_constant(ConstInt(1)) + constant_info = NotVirtualStateInfo(constant_val) + constclass_val = OptValue(self.nodebox) + constclass_val.make_constant(self.nodebox.constbox()) + constclass_info = NotVirtualStateInfo(constclass_val) + constclass2_val = OptValue(self.nodebox) + constclass2_val.make_constant(self.nodebox2.constbox()) + constclass2_info = NotVirtualStateInfo(constclass2_val) + constantnull_val = OptValue(ConstPtr(self.nullptr)) + constantnull_info = NotVirtualStateInfo(constantnull_val) + + # unknown unknown + self.check_no_guards(unknown_info, unknown_info, unknown_val) + self.check_no_guards(unknown_info, unknown_info) + + # unknown nonnull + self.check_no_guards(unknown_info, nonnull_info, nonnull_val) + self.check_no_guards(unknown_info, nonnull_info) + + # unknown knownclass + self.check_no_guards(unknown_info, knownclass_info, knownclass_val) + self.check_no_guards(unknown_info, knownclass_info) + + # unknown constant + self.check_no_guards(unknown_info, constant_info, constant_val) + self.check_no_guards(unknown_info, constant_info) + + + # nonnull unknown + expected = """ + [p0] + guard_nonnull(p0) [] + """ + self.guards(nonnull_info, unknown_info, unknown_val, expected) + self.check_invalid(nonnull_info, unknown_info, unknownnull_val) + self.check_invalid(nonnull_info, unknown_info) + self.check_invalid(nonnull_info, unknown_info) + + # nonnull nonnull + self.check_no_guards(nonnull_info, nonnull_info, nonnull_val) + self.check_no_guards(nonnull_info, nonnull_info, nonnull_val) + + # nonnull knownclass + self.check_no_guards(nonnull_info, knownclass_info, knownclass_val) + self.check_no_guards(nonnull_info, knownclass_info) + + # nonnull constant + self.check_no_guards(nonnull_info, constant_info, constant_val) + self.check_invalid(nonnull_info, constantnull_info, constantnull_val) + self.check_no_guards(nonnull_info, constant_info) + self.check_invalid(nonnull_info, constantnull_info) + + + # knownclass unknown + expected = """ + [p0] + guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(knownclass_info, unknown_info, unknown_val, expected) + self.check_invalid(knownclass_info, unknown_info, unknownnull_val) + self.check_invalid(knownclass_info, unknown_info, knownclass2_val) + self.check_invalid(knownclass_info, unknown_info) + self.check_invalid(knownclass_info, unknown_info) + self.check_invalid(knownclass_info, unknown_info) + + # knownclass nonnull + expected = """ + [p0] + guard_class(p0, ConstClass(node_vtable)) [] + """ + self.guards(knownclass_info, nonnull_info, knownclass_val, expected) + self.check_invalid(knownclass_info, nonnull_info, knownclass2_val) + self.check_invalid(knownclass_info, nonnull_info) + self.check_invalid(knownclass_info, nonnull_info) + + # knownclass knownclass + self.check_no_guards(knownclass_info, knownclass_info, knownclass_val) + self.check_invalid(knownclass_info, knownclass2_info, knownclass2_val) + self.check_no_guards(knownclass_info, knownclass_info) + self.check_invalid(knownclass_info, knownclass2_info) + + # knownclass constant + self.check_invalid(knownclass_info, constantnull_info, constantnull_val) + self.check_invalid(knownclass_info, constclass2_info, constclass2_val) + self.check_invalid(knownclass_info, constantnull_info) + self.check_invalid(knownclass_info, constclass2_info) + + + # constant unknown + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, unknown_info, constant_val, expected) + self.check_invalid(constant_info, unknown_info, unknownnull_val) + self.check_invalid(constant_info, unknown_info) + self.check_invalid(constant_info, unknown_info) + + # constant nonnull + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, nonnull_info, constant_val, expected) + self.check_invalid(constant_info, nonnull_info, constclass2_val) + self.check_invalid(constant_info, nonnull_info) + self.check_invalid(constant_info, nonnull_info) + + # constant knownclass + expected = """ + [i0] + guard_value(i0, 1) [] + """ + self.guards(constant_info, knownclass_info, constant_val, expected) + self.check_invalid(constant_info, knownclass_info, unknownnull_val) + self.check_invalid(constant_info, knownclass_info) + self.check_invalid(constant_info, knownclass_info) + + # constant constant + self.check_no_guards(constant_info, constant_info, constant_val) + self.check_invalid(constant_info, constantnull_info, constantnull_val) + self.check_no_guards(constant_info, constant_info) + self.check_invalid(constant_info, constantnull_info) + + def test_intbounds(self): - value1 = OptValue(BoxInt()) + value1 = OptValue(BoxInt(15)) value1.intbound.make_ge(IntBound(0, 10)) value1.intbound.make_le(IntBound(20, 30)) info1 = NotVirtualStateInfo(value1) @@ -174,10 +366,19 @@ i2 = int_le(i0, 30) guard_true(i2) [] """ - self.guards(info1, info2, BoxInt(15), expected) - py.test.raises(InvalidLoop, self.guards, - info1, info2, BoxInt(50), expected) + self.guards(info1, info2, value1, expected) + self.check_invalid(info1, info2, BoxInt(50)) + def test_intbounds_constant(self): + value1 = OptValue(BoxInt(15)) + value1.intbound.make_ge(IntBound(0, 10)) + value1.intbound.make_le(IntBound(20, 30)) + info1 = NotVirtualStateInfo(value1) + info2 = NotVirtualStateInfo(OptValue(ConstInt(10000))) From noreply at buildbot.pypy.org Wed Apr 23 08:32:36 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 23 Apr 2014 08:32:36 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: open up a few more 'dummy' tests Message-ID: <20140423063236.A76361D2646@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70884:6400eef5ba99 Date: 2014-04-22 23:30 -0700 http://bitbucket.org/pypy/pypy/changeset/6400eef5ba99/ Log: open up a few more 'dummy' tests diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.VOIDP +_C_OPAQUE_PTR = rffi.LONG _C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,11 +7,11 @@ extern "C" { #endif // ifdef __cplusplus - typedef void* cppyy_scope_t; + typedef long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef void* cppyy_object_t; - typedef void* cppyy_method_t; - typedef long cppyy_index_t; + typedef long cppyy_object_t; + typedef long cppyy_method_t; + typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); /* name to opaque C++ scope representation -------------------------------- */ diff --git a/pypy/module/cppyy/include/cppyy.h b/pypy/module/cppyy/include/cppyy.h --- a/pypy/module/cppyy/include/cppyy.h +++ b/pypy/module/cppyy/include/cppyy.h @@ -17,7 +17,7 @@ #ifdef __cplusplus struct CPPYY_G__p2p { #else -#typedef struct +typedef struct { #endif long i; int reftype; diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -718,7 +718,6 @@ def get_method_names(self): return self.space.newlist([self.space.wrap(name) for name in self.methods]) - @jit.elidable_promote('0') def get_overload(self, name): try: return self.methods[name] @@ -731,7 +730,6 @@ def get_datamember_names(self): return self.space.newlist([self.space.wrap(name) for name in self.datamembers]) - @jit.elidable_promote('0') def get_datamember(self, name): try: return self.datamembers[name] @@ -741,7 +739,7 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote('0') + @jit.elidable_promote() def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -15,22 +16,26 @@ typedef std::map Handles_t; static Handles_t s_handles; -class Cppyy_PseudoInfo { -public: - Cppyy_PseudoInfo(int num_methods=0, const char* methods[]=0) : - m_num_methods(num_methods) { - m_methods.reserve(num_methods); - for (int i=0; i < num_methods; ++i) { - m_methods.push_back(methods[i]); - } - } +struct Cppyy_PseudoMethodInfo { + Cppyy_PseudoMethodInfo(const std::string& name, + const std::vector& argtypes, + const std::string& returntype) : + m_name(name), m_argtypes(argtypes), m_returntype(returntype) {} -public: - int m_num_methods; - std::vector m_methods; + std::string m_name; + std::vector m_argtypes; + std::string m_returntype; }; -typedef std::map Scopes_t; +struct Cppyy_PseudoClassInfo { + Cppyy_PseudoClassInfo() {} + Cppyy_PseudoClassInfo(const std::vector& methods) : + m_methods(methods ) {} + + std::vector m_methods; +}; + +typedef std::map Scopes_t; static Scopes_t s_scopes; struct Cppyy_InitPseudoReflectionInfo { @@ -38,8 +43,31 @@ // class example01 -- static long s_scope_id = 0; s_handles["example01"] = (cppyy_scope_t)++s_scope_id; - const char* methods[] = {"staticAddToDouble"}; - Cppyy_PseudoInfo info(1, methods); + + std::vector methods; + + // static double staticAddToDouble(double a); + std::vector argtypes; + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); + + // static int staticAddOneToInt(int a); + // static int staticAddOneToInt(int a, int b); + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + + // static int staticAtoi(const char* str); + argtypes.clear(); + argtypes.push_back("const char*"); + methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); + + // static char* staticStrcpy(const char* strin); + methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; // -- class example01 } @@ -71,10 +99,82 @@ /* method/function dispatching -------------------------------------------- */ +template +static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + T result = T(); + switch ((long)method) { + case 0: // double staticAddToDouble(double) + assert(!self && nargs == 1); + result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + break; + case 1: // int staticAddOneToInt(int) + assert(!self && nargs == 1); + result = ((CPPYY_G__value*)args)[0].obj.in + 1; + break; + case 2: // int staticAddOneToInt(int, int) + assert(!self && nargs == 2); + result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; + break; + case 3: // int staticAtoi(const char* str) + assert(!self && nargs == 1); + result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + break; + } + return result; +} + +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + return cppyy_call_T(method, self, nargs, args); +} + +long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + // char* staticStrcpy(const char* strin) + const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); + char* strout = (char*)malloc(::strlen(strin)+1); + ::strcpy(strout, strin); + return (long)strout; +} + +double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + return cppyy_call_T(method, self, nargs, args); +} + +char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + // char* staticStrcpy(const char* strin) + const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); + char* strout = (char*)malloc(::strlen(strin)+1); + ::strcpy(strout, strin); + return strout; +} + cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { return (cppyy_methptrgetter_t)0; } +/* handling of function argument buffer ----------------------------------- */ +void* cppyy_allocate_function_args(size_t nargs) { + CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); + for (size_t i = 0; i < nargs; ++i) + args[i].type = 'l'; + return (void*)args; +} + + +/* handling of function argument buffer ----------------------------------- */ +void cppyy_deallocate_function_args(void* args) { + free(args); +} + +size_t cppyy_function_arg_sizeof() { + return sizeof(CPPYY_G__value); +} + +size_t cppyy_function_arg_typeoffset() { + return offsetof(CPPYY_G__value, type); +} + /* scope reflection information ------------------------------------------- */ int cppyy_is_namespace(cppyy_scope_t /* handle */) { @@ -106,7 +206,7 @@ /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t handle) { - return s_scopes[handle].m_num_methods; + return s_scopes[handle].m_methods.size(); } cppyy_index_t cppyy_method_index_at(cppyy_scope_t /* scope */, int imeth) { @@ -114,31 +214,32 @@ } char* cppyy_method_name(cppyy_scope_t handle, cppyy_index_t method_index) { - return cppstring_to_cstring(s_scopes[handle].m_methods[(int)method_index]); + return cppstring_to_cstring(s_scopes[handle].m_methods[(int)method_index].m_name); } -char* cppyy_method_result_type(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { - return cppstring_to_cstring("double"); +char* cppyy_method_result_type(cppyy_scope_t handle, cppyy_index_t method_index) { + return cppstring_to_cstring(s_scopes[handle].m_methods[method_index].m_returntype); } -int cppyy_method_num_args(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { - return 1; +int cppyy_method_num_args(cppyy_scope_t handle, cppyy_index_t method_index) { + return s_scopes[handle].m_methods[method_index].m_argtypes.size(); } int cppyy_method_req_args(cppyy_scope_t handle, cppyy_index_t method_index) { return cppyy_method_num_args(handle, method_index); } -char* cppyy_method_arg_type(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */, int /* arg_index */) { - return cppstring_to_cstring("double"); +char* cppyy_method_arg_type(cppyy_scope_t handle, cppyy_index_t method_index, int arg_index) { + return cppstring_to_cstring(s_scopes[handle].m_methods[method_index].m_argtypes[arg_index]); } -char* cppyy_method_arg_default(cppyy_scope_t handle, cppyy_index_t method_index, int arg_index) { +char* cppyy_method_arg_default( + cppyy_scope_t /* handle */, cppyy_index_t /* method_index */, int /* arg_index */) { return cppstring_to_cstring(""); } char* cppyy_method_signature(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { - return cppstring_to_cstring("double"); + return cppstring_to_cstring(""); } int cppyy_method_is_template(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { @@ -167,6 +268,26 @@ /* misc helpers ----------------------------------------------------------- */ +long long cppyy_strtoll(const char* str) { + return strtoll(str, NULL, 0); +} + +extern "C" unsigned long long cppyy_strtoull(const char* str) { + return strtoull(str, NULL, 0); +} + void cppyy_free(void* ptr) { free(ptr); } + +cppyy_object_t cppyy_charp2stdstring(const char* str) { + void* arena = new char[sizeof(std::string)]; + new (arena) std::string(str); + return (cppyy_object_t)arena; +} + +cppyy_object_t cppyy_stdstring2stdstring(cppyy_object_t ptr) { + void* arena = new char[sizeof(std::string)]; + new (arena) std::string(*(std::string*)ptr); + return (cppyy_object_t)arena; +} diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -163,7 +163,7 @@ double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { return cppyy_call_T(method, self, nargs, args); -} +} void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { return (void*)cppyy_call_T(method, self, nargs, args); diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,8 +7,11 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not item.location[0] in ['test_helper.py', 'test_cppyy.py'] or \ - (item.location[0] == 'test_cppyy.py' and not 'TestCPPYYImplementation' in item.location[2]): + if not item.location[0] in ['test_helper.py', 'test_cppyy.py']: + py.test.skip("genreflex is not installed") + import re + if item.location[0] == 'test_cppyy.py' and \ + not re.search("test0[1-3]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_configure(config): From noreply at buildbot.pypy.org Wed Apr 23 08:34:30 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 23 Apr 2014 08:34:30 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support branch into default: Message-ID: <20140423063430.B40FE1D2646@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70885:06afb95580dd Date: 2014-04-22 23:33 -0700 http://bitbucket.org/pypy/pypy/changeset/06afb95580dd/ Log: merge reflex-support branch into default: execute at least some tests, even if no genreflex installed more consistent handling of NULL/None and std::string fixes for elidable improved cint backend initial cling backend diff too long, truncating to 2000 out of 4893 lines diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -12,8 +12,10 @@ '_template_byname' : 'interp_cppyy.template_byname', '_std_string_name' : 'interp_cppyy.std_string_name', '_set_class_generator' : 'interp_cppyy.set_class_generator', + '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', + '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstance' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -190,7 +190,8 @@ [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_get_methptr_getter(space, cppscope, index): return _c_get_methptr_getter(cppscope.handle, index) @@ -214,7 +215,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -222,7 +224,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -283,7 +286,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('2') def c_is_subtype(space, derived, base): if derived == base: @@ -295,7 +299,8 @@ [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('1,2,4') def c_base_offset(space, derived, base, address, direction): if derived == base: @@ -543,19 +548,3 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) -_c_assign2stdstring = rffi.llexternal( - "cppyy_assign2stdstring", - [C_OBJECT, rffi.CCHARP], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_assign2stdstring(space, cppobject, svalue): - charp = rffi.str2charp(svalue) - _c_assign2stdstring(cppobject, charp) - rffi.free_charp(charp) -_c_free_stdstring = rffi.llexternal( - "cppyy_free_stdstring", - [C_OBJECT], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_free_stdstring(space, cppobject): - _c_free_stdstring(cppobject) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -6,8 +6,11 @@ from pypy.interpreter.baseobjspace import W_Root from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import libffi, rdynload +from rpython.tool.udir import udir + +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -19,21 +22,21 @@ if os.environ.get("ROOTSYS"): import commands (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] + if stat != 0: + rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: - rootincpath = [incdir] + rootincpath = [incdir, py.path.local(udir)] rootlibpath = commands.getoutput("root-config --libdir").split() else: - rootincpath = [] + rootincpath = [py.path.local(udir)] rootlibpath = [] def identify(): return 'CINT' -ts_reflect = False -ts_call = False +ts_reflect = True +ts_call = True ts_memory = False ts_helper = False @@ -47,13 +50,15 @@ _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) with rffi.scoped_str2charp('libCore.so') as ll_libname: _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) +with rffi.scoped_str2charp('libHist.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("cintcwrapper.cxx")], include_dirs=[incpath] + rootincpath, includes=["cintcwrapper.h"], library_dirs=rootlibpath, - libraries=["Core", "Cint"], + libraries=["Hist", "Core", "Cint"], use_cpp_linker=True, ) @@ -71,6 +76,23 @@ # CINT-specific pythonizations =============================================== +_c_charp2TString = rffi.llexternal( + "cppyy_charp2TString", + [rffi.CCHARP], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_charp2TString(space, svalue): + charp = rffi.str2charp(svalue) + result = _c_charp2TString(charp) + rffi.free_charp(charp) + return result +_c_TString2TString = rffi.llexternal( + "cppyy_TString2TString", + [C_OBJECT], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_TString2TString(space, cppobject): + return _c_TString2TString(cppobject) def _get_string_data(space, w_obj, m1, m2 = None): from pypy.module.cppyy import interp_cppyy @@ -80,10 +102,85 @@ return w_1 return obj.space.call_method(w_1, m2) +### TF1 ---------------------------------------------------------------------- +class State(object): + def __init__(self, space): + self.tfn_pyfuncs = [] + self.tfn_callbacks = [] + +_create_tf1 = rffi.llexternal( + "cppyy_create_tf1", + [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, + releasegil=False, + compilation_info=eci) + + at unwrap_spec(args_w='args_w') +def tf1_tf1(space, w_self, args_w): + """Pythonized version of TF1 constructor: + takes functions and callable objects, and allows a callback into them.""" + + from pypy.module.cppyy import interp_cppyy + tf1_class = interp_cppyy.scope_byname(space, "TF1") + + # expected signature: + # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) + argc = len(args_w) + + try: + # Note: argcount is +1 for the class (== w_self) + if argc < 5 or 6 < argc: + raise TypeError("wrong number of arguments") + + # second argument must be a name + funcname = space.str_w(args_w[1]) + + # last (optional) argument is number of parameters + npar = 0 + if argc == 6: npar = space.int_w(args_w[5]) + + # third argument must be a callable python object + w_callable = args_w[2] + if not space.is_true(space.callable(w_callable)): + raise TypeError("2nd argument is not a valid python callable") + + # generate a pointer to function + from pypy.module._cffi_backend import newtype, ctypefunc, func + + c_double = newtype.new_primitive_type(space, 'double') + c_doublep = newtype.new_pointer_type(space, c_double) + + # wrap the callable as the signature needs modifying + w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) + + w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) + w_callback = func.callback(space, w_cfunc, w_ifunc, None) + funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) + + # so far, so good; leaves on issue: CINT is expecting a wrapper, but + # we need the overload that takes a function pointer, which is not in + # the dictionary, hence this helper: + newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, + space.float_w(args_w[3]), space.float_w(args_w[4]), npar) + + from pypy.module.cppyy import interp_cppyy + w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, + do_cast=False, python_owns=True, fresh=True) + + # tie all the life times to the TF1 instance + space.setattr(w_instance, space.wrap('_callback'), w_callback) + + return w_instance + except (OperationError, TypeError, IndexError), e: + newargs_w = args_w[1:] # drop class + + # return control back to the original, unpythonized overload + ol = tf1_class.get_overload("TF1") + return ol.call(None, newargs_w) + ### TTree -------------------------------------------------------------------- _ttree_Branch = rffi.llexternal( "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], rffi.LONG, + [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, releasegil=False, compilation_info=eci) @@ -202,6 +299,8 @@ # some instance klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) w_obj = klass.construct() + # 0x10000 = kDeleteObject; reset because we own the object + space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) space.call_method(w_branch, "SetObject", w_obj) space.call_method(w_branch, "GetEntry", space.wrap(entry)) space.setattr(w_self, args_w[0], w_obj) @@ -274,6 +373,9 @@ allfuncs = [ + ### TF1 + tf1_tf1, + ### TTree ttree_Branch, ttree_iter, ttree_getattr, ] @@ -288,7 +390,14 @@ # callback coming in when app-level bound classes have been created def pythonize(space, name, w_pycppclass): - if name == "TFile": + if name == "TCollection": + _method_alias(space, w_pycppclass, "append", "Add") + _method_alias(space, w_pycppclass, "__len__", "GetSize") + + elif name == "TF1": + space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + + elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") elif name == "TObjString": @@ -310,3 +419,17 @@ elif name[0:8] == "TVectorT": # TVectorT<> template _method_alias(space, w_pycppclass, "__len__", "GetNoElements") + +# destruction callback (needs better solution, but this is for CINT +# only and should not appear outside of ROOT-specific uses) +from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL + + at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) +def _Py_cppyy_recursive_remove(space, cppobject): + from pypy.module.cppyy.interp_cppyy import memory_regulator + from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT + + obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) + if obj is not None: + memory_regulator.unregister(obj) + obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -0,0 +1,69 @@ +import py, os + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib import libffi, rdynload + +__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] + +pkgpath = py.path.local(__file__).dirpath().join(os.pardir) +srcpath = pkgpath.join("src") +incpath = pkgpath.join("include") + +import commands +(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") + +if os.environ.get("ROOTSYS"): + if config_stat != 0: # presumably Reflex-only + rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] + else: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() +else: + if config_stat == 0: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() + else: + rootincpath = [] + rootlibpath = [] + +def identify(): + return 'Cling' + +ts_reflect = False +ts_call = 'auto' +ts_memory = 'auto' +ts_helper = 'auto' + +std_string_name = 'std::basic_string' + +eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join("clingcwrapper.cxx")], + include_dirs=[incpath] + rootincpath, + includes=["clingcwrapper.h"], + library_dirs=rootlibpath, + libraries=["Cling"], + compile_extra=["-fno-strict-aliasing"], + use_cpp_linker=True, +) + +_c_load_dictionary = rffi.llexternal( + "cppyy_load_dictionary", + [rffi.CCHARP], rdynload.DLLHANDLE, + releasegil=False, + compilation_info=eci) + +def c_load_dictionary(name): + pch = _c_load_dictionary(name) + return pch + + +# Cling-specific pythonizations +def register_pythonizations(space): + "NOT_RPYTHON" + pass + +def pythonize(space, name, w_pycppclass): + pass diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -214,8 +214,6 @@ 'charp2stdstring' : ([c_ccharp], c_object), 'stdstring2stdstring' : ([c_object], c_object), - 'assign2stdstring' : ([c_object, c_ccharp], c_void), - 'free_stdstring' : ([c_object], c_void), } def load_reflection_library(space): @@ -504,11 +502,6 @@ return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) -def c_assign2stdstring(space, cppobject, svalue): - args = [_Arg(l=cppobject), _Arg(s=svalue)] - call_capi(space, 'assign2stdstring', args) -def c_free_stdstring(space, cppobject): - call_capi(space, 'free_stdstring', [_Arg(l=cppobject)]) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -7,7 +7,7 @@ from rpython.rlib import jit_libffi, rfloat from pypy.module._rawffi.interp_rawffi import unpack_simple_shape -from pypy.module._rawffi.array import W_Array +from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -47,21 +47,35 @@ return rawobject return capi.C_NULL_OBJECT +def is_nullpointer_specialcase(space, w_obj): + # 0, None, and nullptr may serve as "NULL", check for any of them + + # integer 0 + try: + return space.int_w(w_obj) == 0 + except Exception: + pass + # None or nullptr + from pypy.module.cppyy import interp_cppyy + return space.is_true(space.is_(w_obj, space.w_None)) or \ + space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) + def get_rawbuffer(space, w_obj): + # raw buffer try: buf = space.buffer_w(w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass - # special case: allow integer 0 as NULL + # array type try: - buf = space.int_w(w_obj) - if buf == 0: - return rffi.cast(rffi.VOIDP, 0) + arr = space.interp_w(W_ArrayInstance, w_obj, can_be_None=True) + if arr: + return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) except Exception: pass - # special case: allow None as NULL - if space.is_true(space.is_(w_obj, space.w_None)): + # pre-defined NULL + if is_nullpointer_specialcase(space, w_obj): return rffi.cast(rffi.VOIDP, 0) raise TypeError("not an addressable buffer") @@ -139,8 +153,6 @@ self.size = array_size def from_memory(self, space, w_obj, w_pycppclass, offset): - if hasattr(space, "fake"): - raise NotImplementedError # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) address = rffi.cast(rffi.ULONG, address_value) @@ -261,8 +273,7 @@ self.name = name def convert_argument(self, space, w_obj, address, call_local): - raise OperationError(space.w_TypeError, - space.wrap('no converter available for type "%s"' % self.name)) + self._is_abstract(space) class BoolConverter(ffitypes.typeid(bool), TypeConverter): @@ -372,7 +383,12 @@ try: obj = get_rawbuffer(space, w_obj) except TypeError: - obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) + try: + # TODO: accept a 'capsule' rather than naked int + # (do accept int(0), though) + obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + except Exception: + obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj def convert_argument(self, space, w_obj, address, call_local): @@ -385,6 +401,24 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) + def from_memory(self, space, w_obj, w_pycppclass, offset): + # returned as a long value for the address (INTPTR_T is not proper + # per se, but rffi does not come with a PTRDIFF_T) + address = self._get_raw_address(space, w_obj, offset) + ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0]) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + return arr.fromaddress(space, ptrval, sys.maxint) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + if is_nullpointer_specialcase(space, w_value): + address[0] = rffi.cast(rffi.VOIDP, 0) + else: + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class VoidPtrPtrConverter(TypeConverter): _immutable_fields_ = ['uses_local'] @@ -412,7 +446,7 @@ _immutable_fields_ = ['uses_local'] uses_local = True -class InstancePtrConverter(TypeConverter): +class InstanceRefConverter(TypeConverter): _immutable_fields_ = ['libffitype', 'cppclass'] libffitype = jit_libffi.types.pointer @@ -444,17 +478,7 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_obj)) - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy - return interp_cppyy.wrap_cppobject(space, address, self.cppclass, - do_cast=False, is_ref=True) - - def to_memory(self, space, w_obj, w_value, offset): - address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) - address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) - -class InstanceConverter(InstancePtrConverter): +class InstanceConverter(InstanceRefConverter): def convert_argument_libffi(self, space, w_obj, address, call_local): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -468,6 +492,28 @@ def to_memory(self, space, w_obj, w_value, offset): self._is_abstract(space) + +class InstancePtrConverter(InstanceRefConverter): + + def _unwrap_object(self, space, w_obj): + try: + return InstanceRefConverter._unwrap_object(self, space, w_obj) + except OperationError, e: + # if not instance, allow certain special cases + if is_nullpointer_specialcase(space, w_obj): + return capi.C_NULL_OBJECT + raise e + + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, + do_cast=False, is_ref=True) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class InstancePtrPtrConverter(InstancePtrConverter): _immutable_fields_ = ['uses_local'] @@ -487,12 +533,6 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible - def from_memory(self, space, w_obj, w_pycppclass, offset): - self._is_abstract(space) - - def to_memory(self, space, w_obj, w_value, offset): - self._is_abstract(space) - def finalize_call(self, space, w_obj, call_local): from pypy.module.cppyy.interp_cppyy import W_CPPInstance assert isinstance(w_obj, W_CPPInstance) @@ -501,7 +541,6 @@ class StdStringConverter(InstanceConverter): - _immutable_fields_ = ['cppclass'] def __init__(self, space, extra): from pypy.module.cppyy import interp_cppyy @@ -509,24 +548,25 @@ InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - try: + from pypy.module.cppyy.interp_cppyy import W_CPPInstance + if isinstance(w_obj, W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.c_stdstring2stdstring(space, arg) + else: return capi.c_charp2stdstring(space, space.str_w(w_obj)) - except Exception, e: - arg = InstanceConverter._unwrap_object(self, space, w_obj) - result = capi.c_stdstring2stdstring(space, arg) - return result def to_memory(self, space, w_obj, w_value, offset): try: address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - capi.c_assign2stdstring(space, address, space.str_w(w_value)) - return + assign = self.cppclass.get_overload("__assign__") + from pypy.module.cppyy import interp_cppyy + assign.call( + interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False), [w_value]) except Exception: - pass - return InstanceConverter.to_memory(self, space, w_obj, w_value, offset) + InstanceConverter.to_memory(self, space, w_obj, w_value, offset) def free_argument(self, space, arg, call_local): - capi.c_free_stdstring(space, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) class StdStringRefConverter(InstancePtrConverter): _immutable_fields_ = ['cppclass'] @@ -570,6 +610,7 @@ def free_argument(self, space, arg, call_local): if hasattr(space, "fake"): raise NotImplementedError + space.getbuiltinmodule("cpyext") from pypy.module.cpyext.pyobject import Py_DecRef, PyObject Py_DecRef(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0])) @@ -627,8 +668,10 @@ # type check for the benefit of the annotator from pypy.module.cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) - if compound == "*" or compound == "&": + if compound == "*": return InstancePtrConverter(space, cppclass) + elif compound == "&": + return InstanceRefConverter(space, cppclass) elif compound == "**": return InstancePtrPtrConverter(space, cppclass) elif compound == "": @@ -654,7 +697,7 @@ _converters["void**"] = VoidPtrPtrConverter _converters["void*&"] = VoidPtrRefConverter -# special cases (note: CINT backend requires the simple name 'string') +# special cases (note: 'string' aliases added below) _converters["std::basic_string"] = StdStringConverter _converters["const std::basic_string&"] = StdStringConverter # TODO: shouldn't copy _converters["std::basic_string&"] = StdStringRefConverter @@ -776,3 +819,27 @@ for c_type, alias in aliases: _converters[alias] = _converters[c_type] _add_aliased_converters() + +# ROOT-specific converters (TODO: this is a general use case and should grow +# an API; putting it here is done only to circumvent circular imports) +if capi.identify() == "CINT": + + class TStringConverter(InstanceConverter): + def __init__(self, space, extra): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, "TString") + InstanceConverter.__init__(self, space, cppclass) + + def _unwrap_object(self, space, w_obj): + from pypy.module.cppyy import interp_cppyy + if isinstance(w_obj, interp_cppyy.W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.backend.c_TString2TString(space, arg) + else: + return capi.backend.c_charp2TString(space, space.str_w(w_obj)) + + def free_argument(self, space, arg, call_local): + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + + _converters["TString"] = TStringConverter + _converters["const TString&"] = TStringConverter diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -53,17 +53,12 @@ if hasattr(space, "fake"): raise NotImplementedError lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args) - address = rffi.cast(rffi.ULONG, lresult) + ptrval = rffi.cast(rffi.ULONG, lresult) arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap(self.typecode))) - if address == 0: - # TODO: fix this hack; fromaddress() will allocate memory if address - # is null and there seems to be no way around it (ll_buffer can not - # be touched directly) - nullarr = arr.fromaddress(space, address, 0) - assert isinstance(nullarr, W_ArrayInstance) - nullarr.free(space) - return nullarr - return arr.fromaddress(space, address, sys.maxint) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + return arr.fromaddress(space, ptrval, sys.maxint) class VoidExecutor(FunctionExecutor): @@ -144,7 +139,7 @@ from pypy.module.cppyy import interp_cppyy newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args) assert lltype.typeOf(newthis) == capi.C_OBJECT - return space.wrap(newthis) + return space.wrap(rffi.cast(rffi.LONG, newthis)) # really want ptrdiff_t here class InstancePtrExecutor(FunctionExecutor): @@ -160,7 +155,8 @@ from pypy.module.cppyy import interp_cppyy long_result = capi.c_call_l(space, cppmethod, cppthis, num_args, args) ptr_result = rffi.cast(capi.C_OBJECT, long_result) - return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + pyres = interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + return pyres def execute_libffi(self, space, cif_descr, funcaddr, buffer): jit_libffi.jit_ffi_call(cif_descr, funcaddr, buffer) @@ -189,7 +185,7 @@ long_result = capi.c_call_o(space, cppmethod, cppthis, num_args, args, self.cppclass) ptr_result = rffi.cast(capi.C_OBJECT, long_result) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass, - do_cast=False, python_owns=True) + do_cast=False, python_owns=True, fresh=True) def execute_libffi(self, space, cif_descr, funcaddr, buffer): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -206,6 +202,13 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible +class StdStringRefExecutor(InstancePtrExecutor): + + def __init__(self, space, cppclass): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) + InstancePtrExecutor.__init__(self, space, cppclass) + class PyObjectExecutor(PtrTypeExecutor): @@ -295,12 +298,12 @@ _executors["void*"] = PtrTypeExecutor _executors["const char*"] = CStringExecutor -# special cases +# special cases (note: 'string' aliases added below) _executors["constructor"] = ConstructorExecutor _executors["std::basic_string"] = StdStringExecutor -_executors["const std::basic_string&"] = StdStringExecutor -_executors["std::basic_string&"] = StdStringExecutor # TODO: shouldn't copy +_executors["const std::basic_string&"] = StdStringRefExecutor +_executors["std::basic_string&"] = StdStringRefExecutor _executors["PyObject*"] = PyObjectExecutor @@ -363,7 +366,11 @@ "NOT_RPYTHON" aliases = ( ("const char*", "char*"), + ("std::basic_string", "string"), + ("const std::basic_string&", "const string&"), + ("std::basic_string&", "string&"), + ("PyObject*", "_object*"), ) diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -89,11 +89,11 @@ cppyy_index_t cppyy_get_global_operator( cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op); - /* method properties ----------------------------------------------------- */ + /* method properties ------------------------------------------------------ */ int cppyy_is_constructor(cppyy_type_t type, cppyy_index_t idx); int cppyy_is_staticmethod(cppyy_type_t type, cppyy_index_t idx); - /* data member reflection information ------------------------------------ */ + /* data member reflection information ------------------------------------- */ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); @@ -101,7 +101,7 @@ int cppyy_datamember_index(cppyy_scope_t scope, const char* name); - /* data member properties ------------------------------------------------ */ + /* data member properties ------------------------------------------------- */ int cppyy_is_publicdata(cppyy_type_t type, int datamember_index); int cppyy_is_staticdata(cppyy_type_t type, int datamember_index); @@ -112,8 +112,6 @@ cppyy_object_t cppyy_charp2stdstring(const char* str); cppyy_object_t cppyy_stdstring2stdstring(cppyy_object_t ptr); - void cppyy_assign2stdstring(cppyy_object_t ptr, const char* str); - void cppyy_free_stdstring(cppyy_object_t ptr); #ifdef __cplusplus } diff --git a/pypy/module/cppyy/include/cintcwrapper.h b/pypy/module/cppyy/include/cintcwrapper.h --- a/pypy/module/cppyy/include/cintcwrapper.h +++ b/pypy/module/cppyy/include/cintcwrapper.h @@ -11,12 +11,18 @@ void* cppyy_load_dictionary(const char* lib_name); /* pythonization helpers */ + cppyy_object_t cppyy_create_tf1(const char* funcname, unsigned long address, + double xmin, double xmax, int npar); + cppyy_object_t cppyy_ttree_Branch( void* vtree, const char* branchname, const char* classname, void* addobj, int bufsize, int splitlevel); long long cppyy_ttree_GetEntry(void* vtree, long long entry); + cppyy_object_t cppyy_charp2TString(const char* str); + cppyy_object_t cppyy_TString2TString(cppyy_object_t ptr); + #ifdef __cplusplus } #endif // ifdef __cplusplus diff --git a/pypy/module/cppyy/include/clingcwrapper.h b/pypy/module/cppyy/include/clingcwrapper.h new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/include/clingcwrapper.h @@ -0,0 +1,37 @@ +#ifndef CPPYY_CLINGCWRAPPER +#define CPPYY_CLINGCWRAPPER + +#include "capi.h" + +#ifdef __cplusplus +extern "C" { +#endif // ifdef __cplusplus + + /* misc helpers */ + void* cppyy_load_dictionary(const char* lib_name); + +#ifdef __cplusplus +} +#endif // ifdef __cplusplus + +// TODO: pick up from llvm-config --cxxflags +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +// Wrapper callback: except this to become available from Cling directly +typedef void (*CPPYY_Cling_Wrapper_t)(void*, int, void**, void*); + +#endif // ifndef CPPYY_CLINGCWRAPPER diff --git a/pypy/module/cppyy/include/cppyy.h b/pypy/module/cppyy/include/cppyy.h --- a/pypy/module/cppyy/include/cppyy.h +++ b/pypy/module/cppyy/include/cppyy.h @@ -17,7 +17,7 @@ #ifdef __cplusplus struct CPPYY_G__p2p { #else -#typedef struct +typedef struct { #endif long i; int reftype; diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -40,9 +40,28 @@ def __init__(self, space): self.cppscope_cache = { "void" : W_CPPClass(space, "void", capi.C_NULL_TYPE) } + self.w_nullptr = None self.cpptemplate_cache = {} self.cppclass_registry = {} self.w_clgen_callback = None + self.w_fngen_callback = None + +def get_nullptr(space): + if hasattr(space, "fake"): + raise NotImplementedError + state = space.fromcache(State) + if state.w_nullptr is None: + from pypy.module._rawffi.interp_rawffi import unpack_simple_shape + from pypy.module._rawffi.array import W_Array, W_ArrayInstance + arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + # TODO: fix this hack; fromaddress() will allocate memory if address + # is null and there seems to be no way around it (ll_buffer can not + # be touched directly) + nullarr = arr.fromaddress(space, rffi.cast(rffi.ULONG, 0), 0) + assert isinstance(nullarr, W_ArrayInstance) + nullarr.free(space) + state.w_nullptr = space.wrap(nullarr) + return state.w_nullptr @unwrap_spec(name=str) def resolve_name(space, name): @@ -101,6 +120,11 @@ state = space.fromcache(State) state.w_clgen_callback = w_callback + at unwrap_spec(w_callback=W_Root) +def set_function_generator(space, w_callback): + state = space.fromcache(State) + state.w_fngen_callback = w_callback + def register_class(space, w_pycppclass): w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) @@ -108,7 +132,7 @@ # class allows simple aliasing of methods) capi.pythonize(space, cppclass.name, w_pycppclass) state = space.fromcache(State) - state.cppclass_registry[cppclass.handle] = w_pycppclass + state.cppclass_registry[rffi.cast(rffi.LONG, cppclass.handle)] = w_pycppclass class W_CPPLibrary(W_Root): @@ -694,7 +718,6 @@ def get_method_names(self): return self.space.newlist([self.space.wrap(name) for name in self.methods]) - @jit.elidable_promote('0') def get_overload(self, name): try: return self.methods[name] @@ -707,7 +730,6 @@ def get_datamember_names(self): return self.space.newlist([self.space.wrap(name) for name in self.datamembers]) - @jit.elidable_promote('0') def get_datamember(self, name): try: return self.datamembers[name] @@ -717,7 +739,7 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote('0') + @jit.elidable_promote() def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature @@ -1130,19 +1152,23 @@ def get_pythonized_cppclass(space, handle): state = space.fromcache(State) try: - w_pycppclass = state.cppclass_registry[handle] + w_pycppclass = state.cppclass_registry[rffi.cast(rffi.LONG, handle)] except KeyError: final_name = capi.c_scoped_final_name(space, handle) # the callback will cache the class by calling register_class w_pycppclass = space.call_function(state.w_clgen_callback, space.wrap(final_name)) return w_pycppclass +def get_interface_func(space, w_callable, npar): + state = space.fromcache(State) + return space.call_function(state.w_fngen_callback, w_callable, space.wrap(npar)) + def wrap_cppobject(space, rawobject, cppclass, do_cast=True, python_owns=False, is_ref=False, fresh=False): rawobject = rffi.cast(capi.C_OBJECT, rawobject) - # cast to actual cast if requested and possible - w_pycppclass = space.w_None + # cast to actual if requested and possible + w_pycppclass = None if do_cast and rawobject: actual = capi.c_actual_class(space, cppclass, rawobject) if actual != cppclass.handle: @@ -1158,7 +1184,7 @@ # the variables are re-assigned yet) pass - if space.is_w(w_pycppclass, space.w_None): + if w_pycppclass is None: w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created @@ -1174,16 +1200,30 @@ memory_regulator.register(cppinstance) return w_cppinstance - at unwrap_spec(w_cppinstance=W_CPPInstance) -def addressof(space, w_cppinstance): - """Takes a bound C++ instance, returns the raw address.""" - address = rffi.cast(rffi.LONG, w_cppinstance.get_rawobject()) +def _addressof(space, w_obj): + try: + # attempt to extract address from array + return rffi.cast(rffi.INTPTR_T, converter.get_rawbuffer(space, w_obj)) + except TypeError: + pass + # attempt to get address of C++ instance + return rffi.cast(rffi.INTPTR_T, converter.get_rawobject(space, w_obj)) + + at unwrap_spec(w_obj=W_Root) +def addressof(space, w_obj): + """Takes a bound C++ instance or array, returns the raw address.""" + address = _addressof(space, w_obj) return space.wrap(address) - at unwrap_spec(address=int, owns=bool) -def bind_object(space, address, w_pycppclass, owns=False): + at unwrap_spec(owns=bool, cast=bool) +def bind_object(space, w_obj, w_pycppclass, owns=False, cast=False): """Takes an address and a bound C++ class proxy, returns a bound instance.""" - rawobject = rffi.cast(capi.C_OBJECT, address) + try: + # attempt address from array or C++ instance + rawobject = rffi.cast(capi.C_OBJECT, _addressof(space, w_obj)) + except Exception: + # accept integer value as address + rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj)) w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) if not w_cppclass: w_cppclass = scope_byname(space, space.str_w(w_pycppclass)) @@ -1191,4 +1231,4 @@ raise OperationError(space.w_TypeError, space.wrap("no such class: %s" % space.str_w(w_pycppclass))) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) - return wrap_cppobject(space, rawobject, cppclass, do_cast=False, python_owns=owns) + return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -55,6 +55,19 @@ def clgen_callback(name): return get_pycppclass(name) +def fngen_callback(func, npar): # todo, some kind of arg transform spec + if npar == 0: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + return func(la0) + return wrapper + else: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + la1 = [a1[i] for i in range(npar)] + return func(la0, la1) + return wrapper + def make_static_function(func_name, cppol): def function(*args): @@ -416,6 +429,9 @@ # class generator callback cppyy._set_class_generator(clgen_callback) + # function generator callback + cppyy._set_function_generator(fngen_callback) + # user interface objects (note the two-step of not calling scope_byname here: # creation of global functions may cause the creation of classes in the global # namespace, so gbl must exist at that point to cache them) @@ -431,6 +447,9 @@ # be the same issue for all typedef'd builtin types setattr(gbl, 'unsigned int', int) + # install nullptr as a unique reference + setattr(gbl, 'nullptr', cppyy._get_nullptr()) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -8,6 +8,7 @@ #include "TApplication.h" #include "TInterpreter.h" +#include "TVirtualMutex.h" #include "Getline.h" #include "TBaseClass.h" @@ -24,6 +25,8 @@ // for pythonization #include "TTree.h" #include "TBranch.h" +#include "TF1.h" +#include "TString.h" #include "Api.h" @@ -34,15 +37,15 @@ #include #include +// for recursive_remove callback +#include "pypy_macros.h" + /* ROOT/CINT internals --------------------------------------------------- */ extern long G__store_struct_offset; extern "C" void G__LockCriticalSection(); extern "C" void G__UnlockCriticalSection(); -#define G__SETMEMFUNCENV (long)0x7fff0035 -#define G__NOP (long)0x7fff00ff - namespace { class Cppyy_OpenedTClass : public TDictionary { @@ -57,6 +60,16 @@ TList* fAllPubMethod; //all public methods (including from base classes) }; +// memory regulation (cppyy_recursive_remove is generated as a cpyext capi call) +extern "C" void _Py_cppyy_recursive_remove(void*); + +class Cppyy_MemoryRegulator : public TObject { +public: + virtual void RecursiveRemove(TObject* object) { + _Py_cppyy_recursive_remove((void*)object); + } +}; + } // unnamed namespace @@ -82,6 +95,8 @@ /* initialization of the ROOT system (debatable ... ) --------------------- */ namespace { +static Cppyy_MemoryRegulator s_memreg; + class TCppyyApplication : public TApplication { public: TCppyyApplication(const char* acn, Int_t* argc, char** argv, Bool_t do_load = kTRUE) @@ -114,10 +129,13 @@ // enable auto-loader gInterpreter->EnableAutoLoading(); + + // enable memory regulation + gROOT->GetListOfCleanups()->Add(&s_memreg); } }; -static const char* appname = "pypy-cppyy"; +static const char* appname = "PyPyROOT"; class ApplicationStarter { public: @@ -126,11 +144,10 @@ assert(g_classrefs.size() == (ClassRefs_t::size_type)GLOBAL_HANDLE); g_classref_indices[""] = (ClassRefs_t::size_type)GLOBAL_HANDLE; g_classrefs.push_back(TClassRef("")); - g_classref_indices["std"] = g_classrefs.size(); - g_classrefs.push_back(TClassRef("")); // CINT ignores std - g_classref_indices["::std"] = g_classrefs.size(); - g_classrefs.push_back(TClassRef("")); // id. - + // CINT ignores std/::std, so point them to the global namespace + g_classref_indices["std"] = (ClassRefs_t::size_type)GLOBAL_HANDLE; + g_classref_indices["::std"] = (ClassRefs_t::size_type)GLOBAL_HANDLE; + // an offset for the interpreted methods g_interpreted.push_back(G__MethodInfo()); @@ -182,6 +199,7 @@ TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) return (TFunction*)cr->GetListOfMethods()->At(idx); + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return (TFunction*)idx; } @@ -220,21 +238,25 @@ /* name to opaque C++ scope representation -------------------------------- */ int cppyy_num_scopes(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { /* not supported as CINT does not store classes hierarchically */ return 0; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return gClassTable->Classes(); } char* cppyy_scope_name(cppyy_scope_t handle, int iscope) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { /* not supported as CINT does not store classes hierarchically */ assert(!"scope name lookup not supported on inner scopes"); return 0; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); std::string name = gClassTable->At(iscope); if (name.find("::") == std::string::npos) return cppstring_to_cstring(name); @@ -242,6 +264,7 @@ } char* cppyy_resolve_name(const char* cppitem_name) { + R__LOCKGUARD2(gCINTMutex); std::string tname = cppitem_name; // global namespace? @@ -260,7 +283,7 @@ if (ti.Property() & G__BIT_ISENUM) return cppstring_to_cstring("unsigned int"); - // actual typedef resolution; add back array declartion portion, if needed + // actual typedef resolution; add back array declaration portion, if needed std::string rt = ti.TrueName(); // builtin STL types have fake typedefs :/ @@ -274,6 +297,8 @@ } cppyy_scope_t cppyy_get_scope(const char* scope_name) { + R__LOCKGUARD2(gCINTMutex); + // CINT still has trouble with std:: sometimes ... if (strncmp(scope_name, "std::", 5) == 0) scope_name = &scope_name[5]; @@ -303,6 +328,8 @@ } cppyy_type_t cppyy_get_template(const char* template_name) { + R__LOCKGUARD2(gCINTMutex); + ClassRefIndices_t::iterator icr = g_classref_indices.find(template_name); if (icr != g_classref_indices.end()) return (cppyy_type_t)icr->second; @@ -322,6 +349,7 @@ } cppyy_type_t cppyy_actual_class(cppyy_type_t klass, cppyy_object_t obj) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(klass); TClass* clActual = cr->GetActualClass( (void*)obj ); if (clActual && clActual != cr.GetClass()) { @@ -334,6 +362,7 @@ /* memory management ------------------------------------------------------ */ cppyy_object_t cppyy_allocate(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); return (cppyy_object_t)malloc(cr->Size()); } @@ -343,6 +372,7 @@ } void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); cr->Destructor((void*)self, true); } @@ -352,6 +382,8 @@ static inline G__value cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + R__LOCKGUARD2(gCINTMutex); + G__param* libp = (G__param*)((char*)args - offsetof(G__param, para)); assert(libp->paran == nargs); fixup_args(libp); @@ -378,7 +410,6 @@ G__settemplevel(1); long index = (long)&method; - G__CurrentCall(G__SETMEMFUNCENV, 0, &index); // TODO: access to store_struct_offset won't work on Windows long store_struct_offset = G__store_struct_offset; @@ -392,7 +423,6 @@ if (G__get_return(0) > G__RETURN_NORMAL) G__security_recover(0); // 0 ensures silence - G__CurrentCall(G__NOP, 0, 0); G__settemplevel(-1); G__UnlockCriticalSection(); @@ -449,6 +479,7 @@ } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + R__LOCKGUARD2(gCINTMutex); G__value result = cppyy_call_T(method, self, nargs, args); G__pop_tempobject_nodel(); if (result.ref && *(long*)result.ref) { @@ -460,6 +491,7 @@ } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { + R__LOCKGUARD2(gCINTMutex); cppyy_object_t self = (cppyy_object_t)NULL; if ((InterpretedFuncs_t::size_type)method >= g_interpreted.size()) { G__setgvp((long)G__PVOID); @@ -476,9 +508,10 @@ cppyy_object_t cppyy_call_o(cppyy_type_t method, cppyy_object_t self, int nargs, void* args, cppyy_type_t /*result_type*/ ) { + R__LOCKGUARD2(gCINTMutex); G__value result = cppyy_call_T(method, self, nargs, args); G__pop_tempobject_nodel(); - return G__int(result); + return (cppyy_object_t)G__int(result); } cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /*handle*/, cppyy_index_t /*idx*/) { @@ -512,15 +545,17 @@ /* scope reflection information ------------------------------------------- */ int cppyy_is_namespace(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetClassInfo()) return cr->Property() & G__BIT_ISNAMESPACE; - if (strcmp(cr.GetClassName(), "") == 0) + if (handle == (cppyy_scope_t)GLOBAL_HANDLE) return true; return false; } int cppyy_is_enum(const char* type_name) { + R__LOCKGUARD2(gCINTMutex); G__TypeInfo ti(type_name); return (ti.Property() & G__BIT_ISENUM); } @@ -528,6 +563,7 @@ /* type/class reflection information -------------------------------------- */ char* cppyy_final_name(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetClassInfo()) { std::string true_name = G__TypeInfo(cr->GetName()).TrueName(); @@ -540,6 +576,7 @@ } char* cppyy_scoped_final_name(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetClassInfo()) { std::string true_name = G__TypeInfo(cr->GetName()).TrueName(); @@ -555,6 +592,7 @@ } int cppyy_num_bases(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfBases() != 0) return cr->GetListOfBases()->GetSize(); @@ -562,12 +600,14 @@ } char* cppyy_base_name(cppyy_type_t handle, int base_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TBaseClass* b = (TBaseClass*)cr->GetListOfBases()->At(base_index); return type_cppstring_to_cstring(b->GetName()); } int cppyy_is_subtype(cppyy_type_t derived_handle, cppyy_type_t base_handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& derived_type = type_from_handle(derived_handle); TClassRef& base_type = type_from_handle(base_handle); return derived_type->GetBaseClass(base_type) != 0; @@ -575,6 +615,8 @@ size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int /* direction */) { + R__LOCKGUARD2(gCINTMutex); + // WARNING: CINT can not handle actual dynamic casts! TClassRef& derived_type = type_from_handle(derived_handle); TClassRef& base_type = type_from_handle(base_handle); @@ -606,10 +648,11 @@ /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfMethods()) return cr->GetListOfMethods()->GetSize(); - else if (strcmp(cr.GetClassName(), "") == 0) { + else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { if (g_globalfuncs.empty()) { TCollection* funcs = gROOT->GetListOfGlobalFunctions(kTRUE); g_globalfuncs.reserve(funcs->GetSize()); @@ -628,13 +671,17 @@ } cppyy_index_t cppyy_method_index_at(cppyy_scope_t handle, int imeth) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) return (cppyy_index_t)imeth; + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return (cppyy_index_t)&g_globalfuncs[imeth]; } cppyy_index_t* cppyy_method_indices_from_name(cppyy_scope_t handle, const char* name) { + R__LOCKGUARD2(gCINTMutex); + std::vector result; TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { @@ -649,14 +696,12 @@ } ++imeth; } - } - - if (result.empty()) { + } else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { TCollection* funcs = gROOT->GetListOfGlobalFunctions(kTRUE); TFunction* func = 0; TIter ifunc(funcs); while ((func = (TFunction*)ifunc.Next())) { - if (strcmp(func->GetName(), name) == 0) { + if (strcmp(name, func->GetName()) == 0) { g_globalfuncs.push_back(*func); result.push_back((cppyy_index_t)func); } @@ -666,7 +711,7 @@ if (result.empty()) return (cppyy_index_t*)0; - cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*result.size()+1); + cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*(result.size()+1)); for (int i = 0; i < (int)result.size(); ++i) llresult[i] = result[i]; llresult[result.size()] = -1; return llresult; @@ -674,6 +719,7 @@ char* cppyy_method_name(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); std::string name = f->GetName(); TClassRef& cr = type_from_handle(handle); @@ -685,6 +731,7 @@ } char* cppyy_method_result_type(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cppyy_is_constructor(handle, idx)) return cppstring_to_cstring("constructor"); @@ -693,16 +740,19 @@ } int cppyy_method_num_args(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); return f->GetNargs(); } int cppyy_method_req_args(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); return f->GetNargs() - f->GetNargsOpt(); } char* cppyy_method_arg_type(cppyy_scope_t handle, cppyy_index_t idx, int arg_index) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); TMethodArg* arg = (TMethodArg*)f->GetListOfMethodArgs()->At(arg_index); return type_cppstring_to_cstring(arg->GetFullTypeName()); @@ -714,6 +764,7 @@ } char* cppyy_method_signature(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); std::ostringstream sig; @@ -733,6 +784,7 @@ int cppyy_method_is_template(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); std::string name = f->GetName(); @@ -746,6 +798,7 @@ char* cppyy_method_template_arg_name( cppyy_scope_t handle, cppyy_index_t idx, cppyy_index_t /*iarg*/) { + R__LOCKGUARD2(gCINTMutex); // TODO: return only the name for the requested arg TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); @@ -756,6 +809,8 @@ cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); if (cr && cr.GetClass() && !cr->IsLoaded()) { @@ -780,10 +835,12 @@ } cppyy_index_t cppyy_get_global_operator(cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& lccr = type_from_handle(lc); TClassRef& rccr = type_from_handle(rc); - if (!lccr.GetClass() || !rccr.GetClass() || scope != GLOBAL_HANDLE) + if (!lccr.GetClass() || !rccr.GetClass() || scope != (cppyy_scope_t)GLOBAL_HANDLE) return (cppyy_index_t)-1; // (void*)-1 is in kernel space, so invalid as a method handle std::string lcname = lccr->GetName(); @@ -811,12 +868,14 @@ /* method properties ----------------------------------------------------- */ int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TMethod* m = (TMethod*)cr->GetListOfMethods()->At(idx); return strcmp(m->GetName(), ((G__ClassInfo*)cr->GetClassInfo())->Name()) == 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TMethod* m = (TMethod*)cr->GetListOfMethods()->At(idx); return m->Property() & G__BIT_ISSTATIC; @@ -825,10 +884,12 @@ /* data member reflection information ------------------------------------- */ int cppyy_num_datamembers(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfDataMembers()) return cr->GetListOfDataMembers()->GetSize(); - else if (strcmp(cr.GetClassName(), "") == 0) { + else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { TCollection* vars = gROOT->GetListOfGlobals(kTRUE); if (g_globalvars.size() != (GlobalVars_t::size_type)vars->GetSize()) { g_globalvars.clear(); @@ -847,16 +908,21 @@ } char* cppyy_datamember_name(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return cppstring_to_cstring(m->GetName()); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return cppstring_to_cstring(gbl.GetName()); } char* cppyy_datamember_type(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); @@ -870,21 +936,26 @@ } return cppstring_to_cstring(fullType); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return cppstring_to_cstring(gbl.GetFullTypeName()); } size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return (size_t)m->GetOffsetCint(); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return (size_t)gbl.GetAddress(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { // called from updates; add a hard reset as the code itself caches in @@ -908,32 +979,38 @@ } ++idm; } + } else if (handle == (cppyy_type_t)GLOBAL_HANDLE) { + TGlobal* gbl = (TGlobal*)gROOT->GetListOfGlobals(kTRUE)->FindObject(name); + if (!gbl) + return -1; + int idx = g_globalvars.size(); + g_globalvars.push_back(*gbl); + return idx; } - TGlobal* gbl = (TGlobal*)gROOT->GetListOfGlobals(kTRUE)->FindObject(name); - if (!gbl) - return -1; - int idx = g_globalvars.size(); - g_globalvars.push_back(*gbl); - return idx; + return -1; } /* data member properties ------------------------------------------------ */ int cppyy_is_publicdata(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return m->Property() & G__BIT_ISPUBLIC; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return 1; // global data is always public } int cppyy_is_staticdata(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return m->Property() & G__BIT_ISSTATIC; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return 1; // global data is always static } @@ -959,16 +1036,9 @@ return (cppyy_object_t)new std::string(*(std::string*)ptr); } -void cppyy_assign2stdstring(cppyy_object_t ptr, const char* str) { - *((std::string*)ptr) = str; -} - -void cppyy_free_stdstring(cppyy_object_t ptr) { - delete (std::string*)ptr; -} - void* cppyy_load_dictionary(const char* lib_name) { + R__LOCKGUARD2(gCINTMutex); if (0 <= gSystem->Load(lib_name)) return (void*)1; return (void*)0; @@ -976,6 +1046,13 @@ /* pythonization helpers -------------------------------------------------- */ +typedef double (*tfn_callback)(double*, double*); + +cppyy_object_t cppyy_create_tf1(const char* funcname, unsigned long address, + double xmin, double xmax, int npar) { + return (cppyy_object_t)new TF1(funcname, (tfn_callback)address, xmin, xmax, npar); +} + cppyy_object_t cppyy_ttree_Branch(void* vtree, const char* branchname, const char* classname, void* addobj, int bufsize, int splitlevel) { // this little song-and-dance is to by-pass the handwritten Branch methods @@ -987,3 +1064,11 @@ long long cppyy_ttree_GetEntry(void* vtree, long long entry) { return (long long)((TTree*)vtree)->GetEntry((Long64_t)entry); } + +cppyy_object_t cppyy_charp2TString(const char* str) { + return (cppyy_object_t)new TString(str); +} + +cppyy_object_t cppyy_TString2TString(cppyy_object_t ptr) { + return (cppyy_object_t)new TString(*(TString*)ptr); +} diff --git a/pypy/module/cppyy/src/clingcwrapper.cxx b/pypy/module/cppyy/src/clingcwrapper.cxx new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/src/clingcwrapper.cxx @@ -0,0 +1,1810 @@ +#include "cppyy.h" +#include "clingcwrapper.h" + +/************************************************************************* + * Copyright (C) 1995-2014, the ROOT team. * + * LICENSE: LGPLv2.1; see http://root.cern.ch/drupal/content/license * + * CONTRIBUTORS: see http://root.cern.ch/drupal/content/contributors * + *************************************************************************/ + +#include + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclBase.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/Type.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Sema/Sema.h" + +#include "cling/Interpreter/DynamicLibraryManager.h" +#include "cling/Interpreter/Interpreter.h" +#include "cling/Interpreter/LookupHelper.h" +#include "cling/Interpreter/StoredValueRef.h" +#include "cling/MetaProcessor/MetaProcessor.h" + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/Support/raw_ostream.h" + +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace clang; + + +/* cling initialization --------------------------------------------------- */ +namespace { + +cling::Interpreter* gCppyy_Cling; +cling::MetaProcessor* gCppyy_MetaProcessor; + +struct Cppyy_InitCling { // TODO: check whether ROOT/meta's TCling is linked in + Cppyy_InitCling() { + std::vector cling_args_storage; + cling_args_storage.push_back("cling4cppyy"); + + // TODO: get this from env + cling_args_storage.push_back("-I/home/wlavrijsen/rootdev/root/etc"); + + std::vector interp_args; + for (std::vector::const_iterator iarg = cling_args_storage.begin(); + iarg != cling_args_storage.end(); ++iarg) + interp_args.push_back(iarg->c_str()); + + // TODO: get this from env + const char* llvm_resource_dir = "/home/wlavrijsen/rootdev/root/etc/cling"; + gCppyy_Cling = new cling::Interpreter( + interp_args.size(), &(interp_args[0]), llvm_resource_dir); + + // fInterpreter->installLazyFunctionCreator(llvmLazyFunctionCreator); + + { + // R__LOCKGUARD(gInterpreterMutex); + gCppyy_Cling->AddIncludePath("/home/wlavrijsen/rootdev/root/etc/cling"); + gCppyy_Cling->AddIncludePath("."); + } + + // don't check whether modules' files exist. + gCppyy_Cling->getCI()->getPreprocessorOpts().DisablePCHValidation = true; + + // Use a stream that doesn't close its file descriptor. + static llvm::raw_fd_ostream fMPOuts (STDOUT_FILENO, /* ShouldClose */ false); + gCppyy_MetaProcessor = new cling::MetaProcessor(*gCppyy_Cling, fMPOuts); + + gCppyy_Cling->enableDynamicLookup(); + } +} _init; + +typedef std::map NamedHandles_t; +static NamedHandles_t s_named; + +struct SimpleScope { + std::vector m_methods; + std::vector m_data; +}; + +typedef std::map Scopes_t; +static Scopes_t s_scopes; + +typedef std::map Wrappers_t; +static Wrappers_t s_wrappers; + +} // unnamed namespace + + +/* local helpers --------------------------------------------------------- */ +static inline void print_error(const std::string& where, const std::string& what) { + std::cerr << where << ": " << what << std::endl; +} + +static inline char* cppstring_to_cstring(const std::string& name) { + char* name_char = (char*)malloc(name.size() + 1); + strcpy(name_char, name.c_str()); + return name_char; +} + +static inline SimpleScope* scope_from_handle(cppyy_type_t handle) { + return s_scopes[(cppyy_scope_t)handle]; +} + +static inline std::string qualtype_to_string(const QualType& qt, const ASTContext& atx) { + std::string result; + + PrintingPolicy policy(atx.getPrintingPolicy()); + policy.SuppressTagKeyword = true; // no class or struct keyword + policy.SuppressScope = true; // force scope from a clang::ElaboratedType + policy.AnonymousTagLocations = false; // no file name + line number for anonymous types + // The scope suppression is required for getting rid of the anonymous part of the name + // of a class defined in an anonymous namespace. + + qt.getAsStringInternal(result, policy); + return result; +} + +static inline std::vector build_args(int nargs, void* args) { + std::vector arguments; + arguments.reserve(nargs); + for (int i = 0; i < nargs; ++i) { + char tc = ((CPPYY_G__value*)args)[i].type; + if (tc != 'a' && tc != 'o') + arguments.push_back(&((CPPYY_G__value*)args)[i]); + else + arguments.push_back((void*)(*(long*)&((CPPYY_G__value*)args)[i])); + } + return arguments; +} + + +/* name to opaque C++ scope representation -------------------------------- */ +int cppyy_num_scopes(cppyy_scope_t handle) { + return 0; +} + +char* cppyy_resolve_name(const char* cppitem_name) { + std::cout << " RESOLVING: " << cppitem_name << std::endl; + return cppstring_to_cstring(cppitem_name); +} + +cppyy_scope_t cppyy_get_scope(const char* scope_name) { + const cling::LookupHelper& lh = gCppyy_Cling->getLookupHelper(); + const Type* type = 0; + const Decl* decl = lh.findScope(scope_name, &type, /* intantiateTemplate= */ true); + if (!decl) { + //std::string buf = TClassEdit::InsertStd(name); + //decl = lh.findScope(buf, &type, /* intantiateTemplate= */ true); + } + if (!decl && type) { + const TagType* tagtype = type->getAs(); + if (tagtype) { + decl = tagtype->getDecl(); + } + } + + std::cout << "FOR: " << scope_name << " RECEIVED: " << type << " AND: " << decl << std::endl; + if (decl) { + DeclContext* dc = llvm::cast(const_cast(decl)); + SimpleScope* s = new SimpleScope; + for (DeclContext::decl_iterator idecl = dc->decls_begin(); *idecl; ++idecl) { + if (FunctionDecl* m = llvm::dyn_cast_or_null(*idecl)) + s->m_methods.push_back(m); + else if (FieldDecl* d = llvm::dyn_cast_or_null(*idecl)) + s->m_data.push_back(d); + } + s_scopes[(cppyy_scope_t)decl] = s; + } + + return (cppyy_scope_t)decl; // lookup failure return 0 (== error) +} + + +/* method/function dispatching -------------------------------------------- */ + +// TODO: expect the below to live in libCling.so +static CPPYY_Cling_Wrapper_t make_wrapper(const FunctionDecl* fdecl); +static void exec_with_valref_return(void* address, cling::StoredValueRef* ret, const FunctionDecl*); +static long long sv_to_long_long(const cling::StoredValueRef& svref); +// -- TODO: expect the above to live in libCling.so + + +template +static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + if (s_wrappers.find(method) == s_wrappers.end()) { + make_wrapper((FunctionDecl*)method); + } + cling::StoredValueRef ret; + // std::vector arguments = build_args(nargs, args); + // CPPYY_Cling_Wrapper_t cb = (CPPYY_Cling_Wrapper_t)method; + exec_with_valref_return((void*)self, &ret, (FunctionDecl*)method); + // (*cb)((void*)self, nargs, const_cast(arguments.data()), ret); + return static_cast(sv_to_long_long(ret)); +} + + + +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + return cppyy_call_T(method, self, nargs, args); +} + + + +cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { + return (cppyy_methptrgetter_t)0; +} + + +/* handling of function argument buffer ----------------------------------- */ +void* cppyy_allocate_function_args(size_t nargs) { + CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); + for (size_t i = 0; i < nargs; ++i) + args[i].type = 'l'; + return (void*)args; +} + +void cppyy_deallocate_function_args(void* args) { + free(args); +} + +size_t cppyy_function_arg_sizeof() { + return sizeof(CPPYY_G__value); +} + +size_t cppyy_function_arg_typeoffset() { + return offsetof(CPPYY_G__value, type); +} + + +/* scope reflection information ------------------------------------------- */ +int cppyy_is_namespace(cppyy_scope_t /* handle */) { + return 0; +} + +int cppyy_is_enum(const char* /* type_name */) { + return 0; +} + + +/* class reflection information ------------------------------------------- */ +char* cppyy_final_name(cppyy_type_t handle) { + for (NamedHandles_t::iterator isp = s_named.begin(); isp != s_named.end(); ++isp) { + if (isp->second == (cppyy_scope_t)handle) + return cppstring_to_cstring(isp->first); + } + return cppstring_to_cstring(""); +} + +char* cppyy_scoped_final_name(cppyy_type_t handle) { + return cppyy_final_name(handle); +} + +int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { + return 1; +} + + +/* method/function reflection information --------------------------------- */ +int cppyy_num_methods(cppyy_scope_t handle) { + SimpleScope* s = scope_from_handle(handle); + if (!s) return 0; + return s->m_methods.size(); +} From noreply at buildbot.pypy.org Wed Apr 23 08:53:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 23 Apr 2014 08:53:20 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify Message-ID: <20140423065320.3FD071C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70886:4cedca9c5099 Date: 2014-04-23 02:47 -0400 http://bitbucket.org/pypy/pypy/changeset/4cedca9c5099/ Log: simplify diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -6,7 +6,7 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import jit_libffi, rfloat -from pypy.module._rawffi.interp_rawffi import unpack_simple_shape +from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -132,7 +132,7 @@ def __getattr__(self, name): if name.startswith('array_'): typecode = name[len('array_'):] - arr = self.space.interp_w(W_Array, unpack_simple_shape(self.space, self.space.wrap(typecode))) + arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode)) setattr(self, name, arr) return arr raise AttributeError(name) @@ -409,7 +409,7 @@ if ptrval == 0: from pypy.module.cppyy import interp_cppyy return interp_cppyy.get_nullptr(space) - arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + arr = space.interp_w(W_Array, letter2tp(space, 'P')) return arr.fromaddress(space, ptrval, sys.maxint) def to_memory(self, space, w_obj, w_value, offset): From noreply at buildbot.pypy.org Wed Apr 23 09:43:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Apr 2014 09:43:04 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add description Message-ID: <20140423074304.D99561C06C3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1178:3f334984ef30 Date: 2014-04-23 09:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/3f334984ef30/ Log: Add description diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -151,7 +151,10 @@ /* This lock is acquired when that segment calls synchronize_object_now. On the rare event of a page_privatize(), the latter will acquire all the locks in all segments. Otherwise, for the common case, - it's cheap. */ + it's cheap. (The set of all 'privatization_lock' in all segments + works like one single read-write lock, with page_privatize() acquiring + the write lock; but this variant is more efficient for the case of + many reads / rare writes.) */ uint8_t privatization_lock; /* In case of abort, we restore the 'shadowstack' field and the From noreply at buildbot.pypy.org Wed Apr 23 10:17:21 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 23 Apr 2014 10:17:21 +0200 (CEST) Subject: [pypy-commit] pypy default: move dump_storage to tests and don't use it during normal run (it's mostly Message-ID: <20140423081721.43E361D2B36@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70887:c95bd1d4e622 Date: 2014-04-23 10:16 +0200 http://bitbucket.org/pypy/pypy/changeset/c95bd1d4e622/ Log: move dump_storage to tests and don't use it during normal run (it's mostly useless and very verbose) diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -385,7 +385,6 @@ self._add_pending_fields(pending_setfields) storage.rd_consts = self.memo.consts - dump_storage(storage, liveboxes) return liveboxes[:] def _number_virtuals(self, liveboxes, optimizer, num_env_virtuals): @@ -1457,50 +1456,3 @@ def int_add_const(self, base, offset): return base + offset - -# ____________________________________________________________ - -def dump_storage(storage, liveboxes): - "For profiling only." - debug_start("jit-resume") - if have_debug_prints(): - debug_print('Log storage', compute_unique_id(storage)) - frameinfo = storage.rd_frame_info_list - while frameinfo is not None: - try: - jitcodename = frameinfo.jitcode.name - except AttributeError: - jitcodename = str(compute_unique_id(frameinfo.jitcode)) - debug_print('\tjitcode/pc', jitcodename, - frameinfo.pc, - 'at', compute_unique_id(frameinfo)) - frameinfo = frameinfo.prev - numb = storage.rd_numb - while numb: - debug_print('\tnumb', str([untag(numb.nums[i]) - for i in range(len(numb.nums))]), - 'at', compute_unique_id(numb)) - numb = numb.prev - for const in storage.rd_consts: - debug_print('\tconst', const.repr_rpython()) - for box in liveboxes: - if box is None: - debug_print('\tbox', 'None') - else: - debug_print('\tbox', box.repr_rpython()) - if storage.rd_virtuals is not None: - for virtual in storage.rd_virtuals: - if virtual is None: - debug_print('\t\t', 'None') - else: - virtual.debug_prints() - if storage.rd_pendingfields: - debug_print('\tpending setfields') - for i in range(len(storage.rd_pendingfields)): - lldescr = storage.rd_pendingfields[i].lldescr - num = storage.rd_pendingfields[i].num - fieldnum = storage.rd_pendingfields[i].fieldnum - itemindex = storage.rd_pendingfields[i].itemindex - debug_print("\t\t", str(lldescr), str(untag(num)), str(untag(fieldnum)), itemindex) - - debug_stop("jit-resume") diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -31,7 +31,55 @@ except KeyError: value = self.values[box] = OptValue(box) return value - + + +# ____________________________________________________________ + +def dump_storage(storage, liveboxes): + "For profiling only." + debug_start("jit-resume") + if have_debug_prints(): + debug_print('Log storage', compute_unique_id(storage)) + frameinfo = storage.rd_frame_info_list + while frameinfo is not None: + try: + jitcodename = frameinfo.jitcode.name + except AttributeError: + jitcodename = str(compute_unique_id(frameinfo.jitcode)) + debug_print('\tjitcode/pc', jitcodename, + frameinfo.pc, + 'at', compute_unique_id(frameinfo)) + frameinfo = frameinfo.prev + numb = storage.rd_numb + while numb: + debug_print('\tnumb', str([untag(numb.nums[i]) + for i in range(len(numb.nums))]), + 'at', compute_unique_id(numb)) + numb = numb.prev + for const in storage.rd_consts: + debug_print('\tconst', const.repr_rpython()) + for box in liveboxes: + if box is None: + debug_print('\tbox', 'None') + else: + debug_print('\tbox', box.repr_rpython()) + if storage.rd_virtuals is not None: + for virtual in storage.rd_virtuals: + if virtual is None: + debug_print('\t\t', 'None') + else: + virtual.debug_prints() + if storage.rd_pendingfields: + debug_print('\tpending setfields') + for i in range(len(storage.rd_pendingfields)): + lldescr = storage.rd_pendingfields[i].lldescr + num = storage.rd_pendingfields[i].num + fieldnum = storage.rd_pendingfields[i].fieldnum + itemindex = storage.rd_pendingfields[i].itemindex + debug_print("\t\t", str(lldescr), str(untag(num)), str(untag(fieldnum)), itemindex) + + debug_stop("jit-resume") + def test_tag(): assert tag(3, 1) == rffi.r_short(3<<2|1) From noreply at buildbot.pypy.org Wed Apr 23 14:54:29 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 23 Apr 2014 14:54:29 +0200 (CEST) Subject: [pypy-commit] stmgc default: add function to hint another thread to commit soon. used in contention.c to Message-ID: <20140423125429.9A6461C3512@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1179:c79d45878460 Date: 2014-04-23 14:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/c79d45878460/ Log: add function to hint another thread to commit soon. used in contention.c to advise committing transactions that cause others to abort. diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -44,6 +44,7 @@ visit((object_t **)&n->next); } +void stmcb_commit_soon() {} nodeptr_t global_chained_list; diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c --- a/c7/demo/demo_largemalloc.c +++ b/c7/demo/demo_largemalloc.c @@ -23,6 +23,8 @@ abort(); } +void stmcb_commit_soon() {} + /************************************************************/ #define ARENA_SIZE (1024*1024*1024) diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -79,6 +79,8 @@ assert(n->next == *last_next); } +void stmcb_commit_soon() {} + int get_rand(int max) { if (max == 0) diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -39,6 +39,8 @@ visit((object_t **)&n->next); } +void stmcb_commit_soon() {} + static sem_t done; diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -164,7 +164,8 @@ change_timing_state(wait_category); - /* XXX should also tell other_pseg "please commit soon" */ + /* tell the other to commit ASAP */ + signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); @@ -180,6 +181,9 @@ } else if (!contmgr.abort_other) { + /* tell the other to commit ASAP, since it causes aborts */ + signal_other_to_commit_soon(contmgr.other_pseg); + dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; abort_with_mutex(); @@ -256,6 +260,10 @@ abort_data_structures_from_segment_num(other_segment_num); } dprintf(("killed other thread\n")); + + /* we should commit soon, we caused an abort */ + //signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num)); + stmcb_commit_soon(); } } diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -4,7 +4,8 @@ static void inevitable_contention_management(uint8_t other_segment_num); static inline bool is_abort(uintptr_t nursery_end) { - return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE + && nursery_end != NSE_SIGCOMMITSOON); } static inline bool is_aborting_now(uint8_t other_segment_num) { diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,6 +1,7 @@ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ #define NSE_SIGPAUSE STM_TIME_WAIT_OTHER +#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON static uint32_t highest_overflow_number; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -2,6 +2,10 @@ #include #include +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + /* Each segment can be in one of three possible states, described by the segment variable 'safe_point': @@ -260,6 +264,16 @@ static bool _safe_points_requested = false; #endif +static void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) +{ + assert(_has_mutex()); + /* never overwrite abort signals or safepoint requests + (too messy to deal with) */ + if (!is_abort(other_pseg->pub.nursery_end) + && !pause_signalled) + other_pseg->pub.nursery_end = NSE_SIGCOMMITSOON; +} + static void signal_everybody_to_pause_running(void) { assert(_safe_points_requested == false); @@ -323,7 +337,20 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) break; /* no safe point requested */ + if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); + } + + stmcb_commit_soon(); + if (!pause_signalled) { + STM_SEGMENT->nursery_end = NURSERY_END; + break; + } + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + } assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + assert(pause_signalled); /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -51,6 +51,7 @@ "wait write read", "wait inevitable", "wait other", + "sync commit soon", "bookkeeping", "minor gc", "major gc", diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -66,6 +66,7 @@ STM_TIME_WAIT_WRITE_READ, STM_TIME_WAIT_INEVITABLE, STM_TIME_WAIT_OTHER, + STM_TIME_SYNC_COMMIT_SOON, STM_TIME_BOOKKEEPING, STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, @@ -209,9 +210,13 @@ The "size rounded up" must be a multiple of 8 and at least 16. "Tracing" an object means enumerating all GC references in it, by invoking the callback passed as argument. + stmcb_commit_soon() is called when it is advised to commit + the transaction as soon as possible in order to avoid conflicts + or improve performance in general. */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +extern void stmcb_commit_soon(void); /* Allocate an object of the given size, which must be a multiple diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -272,6 +272,10 @@ } } +void stmcb_commit_soon() +{ +} + ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_LARGEMALLOC_TEST', '1'), From noreply at buildbot.pypy.org Wed Apr 23 15:53:48 2014 From: noreply at buildbot.pypy.org (dalcinl) Date: Wed, 23 Apr 2014 15:53:48 +0200 (CEST) Subject: [pypy-commit] cffi default: Issue 153: Generate same C code for CPython 2 and 3 Message-ID: <20140423135348.A57D91D2411@cobra.cs.uni-duesseldorf.de> Author: Lisandro Dalcin Branch: Changeset: r1503:bcedf12c6e01 Date: 2014-04-23 16:10 +0300 http://bitbucket.org/cffi/cffi/changeset/bcedf12c6e01/ Log: Issue 153: Generate same C code for CPython 2 and 3 - CPython 2 and 3: make ffi.ferify() generate identical C code - CPython 3: fix refcount management in case of errors at import diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -96,36 +96,47 @@ # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -894,25 +905,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) From noreply at buildbot.pypy.org Wed Apr 23 17:01:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Apr 2014 17:01:29 +0200 (CEST) Subject: [pypy-commit] stmgc marker: in-progress: record markers corresponding to old_modified_objects Message-ID: <20140423150129.B9CD21D28AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1180:3cdcd273d6d3 Date: 2014-04-23 17:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/3cdcd273d6d3/ Log: in-progress: record markers corresponding to old_modified_objects diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -86,6 +86,13 @@ Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); + /* Add the current marker, recording where we wrote to this object */ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->modified_old_objects_markers = + list_append2(STM_PSEGMENT->modified_old_objects_markers, + marker[0], marker[1]); + /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ @@ -223,12 +230,17 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers)); assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); +#ifndef NDEBUG + /* this should not be used when objects_pointing_to_nursery == NULL */ + STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; +#endif check_nursery_at_transaction_start(); } @@ -458,6 +470,7 @@ })); list_clear(STM_PSEGMENT->modified_old_objects); + list_clear(STM_PSEGMENT->modified_old_objects_markers); } static void _finish_transaction(int attribute_to) @@ -596,6 +609,7 @@ })); list_clear(pseg->modified_old_objects); + list_clear(pseg->modified_old_objects_markers); } static void abort_data_structures_from_segment_num(int segment_num) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -78,9 +78,17 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and - that need to be copied to other segments upon commit. */ + that need to be copied to other segments upon commit. Note that + every object takes three list items: the object, and two words for + the location marker. */ struct list_s *modified_old_objects; + /* For each entry in 'modified_old_objects', we have two entries + in the following list, which give the marker at the time we added + the entry to modified_old_objects. */ + struct list_s *modified_old_objects_markers; + uintptr_t modified_old_objects_markers_num_old; + /* List of out-of-nursery objects that may contain pointers to nursery objects. This is used to track the GC status: they are all objects outside the nursery on which an stm_write() occurred diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -418,6 +418,19 @@ } } +static void mark_visit_from_markers(void) +{ + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + char *base = get_segment_base(j); + struct list_s *lst = get_priv_segment(j)->modified_old_objects_markers; + uintptr_t i; + for (i = list_count(lst); i > 0; i -= 2) { + mark_visit_object((object_t *)list_item(lst, i - 1), base); + } + } +} + static void clean_up_segment_lists(void) { long i; @@ -520,6 +533,7 @@ /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); + mark_visit_from_markers(); mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -33,6 +33,18 @@ #define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) +static inline struct list_s *list_append2(struct list_s *lst, + uintptr_t item0, uintptr_t item1) +{ + uintptr_t index = lst->count; + lst->count += 2; + if (UNLIKELY(index >= lst->last_allocated)) + lst = _list_grow(lst, index + 1); + lst->items[index + 0] = item0; + lst->items[index + 1] = item1; + return lst; +} + static inline void list_clear(struct list_s *lst) { @@ -66,6 +78,11 @@ lst->items[index] = newitem; } +static inline uintptr_t *list_ptr_to_item(struct list_s *lst, uintptr_t index) +{ + return &lst->items[index]; +} + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -11,38 +11,53 @@ const char *marker); +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +{ + struct stm_shadowentry_s *current = tl->shadowstack - 1; + struct stm_shadowentry_s *base = tl->shadowstack_base; + /* stop walking just before shadowstack_base, which contains + STM_STACK_MARKER_OLD which shouldn't be expanded */ + while (--current > base) { + if (((uintptr_t)current->ss) & 1) { + /* found the odd marker */ + marker[0] = (uintptr_t)current[0].ss; + marker[1] = (uintptr_t)current[1].ss; + return; + } + } + marker[0] = 0; + marker[1] = 0; +} + +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker) +{ + if (marker[0] == 0) + return; /* no marker entry found */ + if (outmarker[0] != 0) + return; /* already collected an entry */ + if (stmcb_expand_marker != NULL) { + stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], + outmarker, _STM_MARKER_LEN); + } +} + static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) { - if (pseg->marker_self[0] != 0) - return; /* already collected an entry */ - - if (stmcb_expand_marker != NULL) { - stm_thread_local_t *tl = pseg->pub.running_thread; - struct stm_shadowentry_s *current = tl->shadowstack - 1; - struct stm_shadowentry_s *base = tl->shadowstack_base; - /* stop walking just before shadowstack_base, which contains - STM_STACK_MARKER_OLD which shouldn't be expanded */ - while (--current > base) { - uintptr_t x = (uintptr_t)current->ss; - if (x & 1) { - /* the stack entry is an odd number */ - stmcb_expand_marker(pseg->pub.segment_base, x, current[1].ss, - pseg->marker_self, _STM_MARKER_LEN); - - if (pseg->marker_self[0] != 0) - break; - } - } - } + uintptr_t marker[2]; + marker_fetch(pseg->pub.running_thread, marker); + marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); } char *_stm_expand_marker(void) { - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - pseg->marker_self[0] = 0; - marker_fetch_expand(pseg); - return pseg->marker_self; + /* for tests only! */ + static char _result[_STM_MARKER_LEN]; + uintptr_t marker[2]; + _result[0] = 0; + marker_fetch(STM_SEGMENT->running_thread, marker); + marker_expand(marker, STM_SEGMENT->segment_base, _result); + return _result; } static void marker_copy(stm_thread_local_t *tl, diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -1,4 +1,7 @@ +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker); static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg); static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -232,6 +232,18 @@ _collect_now(item)); } +static void collect_roots_from_markers(uintptr_t num_old) +{ + /* visit the marker objects */ + struct list_s *mlst = STM_PSEGMENT->modified_old_objects_markers; + STM_PSEGMENT->modified_old_objects_markers_num_old = list_count(mlst); + uintptr_t i, total = list_count(mlst); + assert((total & 1) == 0); + for (i = num_old + 1; i < total; i += 2) { + minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); + } +} + static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ @@ -296,6 +308,7 @@ /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ + uintptr_t num_old; if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); @@ -305,7 +318,12 @@ into objects_pointing_to_nursery, but instead we use the following shortcut */ collect_modified_old_objects(); + num_old = 0; } + else + num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; + + collect_roots_from_markers(num_old); collect_roots_in_nursery(); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -78,6 +78,7 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->modified_old_objects_markers = list_create(); pr->young_weakrefs = list_create(); pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); @@ -115,6 +116,7 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->modified_old_objects_markers); list_free(pr->young_weakrefs); list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -159,8 +159,6 @@ @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") def expand_marker(base, number, ptr, outbuf, outbufsize): seen.append(number) - if ptr == ffi.NULL: - return s = '%d %r\x00' % (number, ptr) assert len(s) <= outbufsize outbuf[0:len(s)] = s @@ -174,8 +172,8 @@ self.push_root(ffi.cast("object_t *", 29)) self.push_root(ffi.cast("object_t *", ffi.NULL)) raw = lib._stm_expand_marker() - assert ffi.string(raw) == '27 %r' % (p,) - assert seen == [29, 27] + assert ffi.string(raw).startswith('29 ') + assert seen == [29] def test_double_abort_markers_cb(self): @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") @@ -190,6 +188,10 @@ self.push_root(ffi.cast("object_t *", 19)) self.push_root(ffi.cast("object_t *", ffi.NULL)) stm_set_char(p, 'A') + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) # self.switch(1) self.start_transaction() From noreply at buildbot.pypy.org Wed Apr 23 17:15:04 2014 From: noreply at buildbot.pypy.org (dalcinl) Date: Wed, 23 Apr 2014 17:15:04 +0200 (CEST) Subject: [pypy-commit] cffi default: CPython: Initialize all slots of PyMethodDef table Message-ID: <20140423151504.B026F1C02F2@cobra.cs.uni-duesseldorf.de> Author: Lisandro Dalcin Branch: Changeset: r1504:cc11d18fb59b Date: 2014-04-23 17:35 +0300 http://bitbucket.org/cffi/cffi/changeset/cc11d18fb59b/ Log: CPython: Initialize all slots of PyMethodDef table - Silent GCC -Wmissing-field-initializers diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -89,8 +89,8 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # @@ -405,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -492,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: From noreply at buildbot.pypy.org Wed Apr 23 19:06:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Apr 2014 19:06:49 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Check in the current state Message-ID: <20140423170649.B5E7A1D2950@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5197:b94e6d82d560 Date: 2014-04-23 19:06 +0200 http://bitbucket.org/pypy/extradoc/changeset/b94e6d82d560/ Log: Check in the current state diff --git a/talk/icooolps2014/overview.txt b/talk/icooolps2014/overview.txt new file mode 100644 --- /dev/null +++ b/talk/icooolps2014/overview.txt @@ -0,0 +1,71 @@ +Position paper outline + +Introduction +============ + + +Issue +----- + +- efficiently supporting multi-CPU usage on dynamic languages that were designed with GIL semantics in mind +(supporting (large) atomic blocks for synchronization) + + +Our Position +------------ + +Current solutions like STM, HTM, and fine-grained locking are slow, hard +to implement correctly, and don't fit the specific problems of dynamic +language. STM is the best way forward but has bad performance, so we +fix that. + + +Discussion +========== + +dynamic language VM problems: + +- high allocation rate (short lived objects) +- (don't know anything about the program that runs until it actually runs: arbitrary atomic block size) + +GIL: + +- nice semantics +- easy support of atomic blocks +- no parallelism + +fine-grained locking: + +- support of atomic blocks? +- hard to get right (deadlocks, performance, lock-granularity) +- very hard to get right for a large language +- hard to retro-fit, as all existing code assumes GIL semantics +- (there are some semantic differences, right? not given perfect lock-placement, but well) +( http://www.jython.org/jythonbook/en/1.0/Concurrency.html ) + +multiprocessing: + +- often needs major restructuring of programs (explicit data exchange) +- sometimes communication overhead is too large +- shared memory is a problem, copies of memory are too expensive + +HTM: + +- false-sharing on cache-line level +- limited capacity (caches, undocumented) +- random aborts (haswell) +- generally: transaction-length limited (no atomic blocks) + +STM: + +- overhead (100-1000%) (barrier reference resolution, kills performance on low #cpu) +(FastLane: low overhead, not much gain) +- unlimited transaction length (easy atomic blocks) + + +Potential alternative approach +============================== + +possible solution: +- use virtual memory paging to somehow lower the STM overhead +- tight integration with GC and jit? From noreply at buildbot.pypy.org Wed Apr 23 19:35:12 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 23 Apr 2014 19:35:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Speed up zlib a bit by using memcpy in place a loop Message-ID: <20140423173512.979C51C06C3@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70888:c65c33511bc7 Date: 2014-04-23 10:34 -0700 http://bitbucket.org/pypy/pypy/changeset/c65c33511bc7/ Log: Speed up zlib a bit by using memcpy in place a loop diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -3,7 +3,9 @@ from rpython.rlib import rgc from rpython.rlib.rstring import StringBuilder +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from rpython.rtyper.tool import rffi_platform from rpython.translator.platform import platform as compiler, CompilationError from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -347,8 +349,7 @@ """ # Prepare the input buffer for the stream with lltype.scoped_alloc(rffi.CCHARP.TO, len(data)) as inbuf: - for i in xrange(len(data)): - inbuf[i] = data[i] + copy_string_to_raw(llstr(data), inbuf, 0, len(data)) stream.c_next_in = rffi.cast(Bytefp, inbuf) rffi.setintfield(stream, 'c_avail_in', len(data)) From noreply at buildbot.pypy.org Wed Apr 23 20:08:36 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 23 Apr 2014 20:08:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove an unused library from rlib Message-ID: <20140423180836.DF1301C06C3@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70889:7c640eb29429 Date: 2014-04-23 11:08 -0700 http://bitbucket.org/pypy/pypy/changeset/7c640eb29429/ Log: Remove an unused library from rlib diff --git a/rpython/rlib/bitmanipulation.py b/rpython/rlib/bitmanipulation.py deleted file mode 100644 --- a/rpython/rlib/bitmanipulation.py +++ /dev/null @@ -1,32 +0,0 @@ -from rpython.rlib import unroll - - -class BitSplitter(dict): - def __getitem__(self, lengths): - if isinstance(lengths, int): - lengths = (lengths, ) - if lengths in self: - return dict.__getitem__(self, lengths) - unrolling_lenghts = unroll.unrolling_iterable(lengths) - def splitbits(integer): - result = () - sum = 0 - for length in unrolling_lenghts: - sum += length - n = integer & ((1<= 0 - result += (n, ) - integer = integer >> length - assert sum <= 32 - return result - splitbits.func_name += "_" + "_".join([str(i) for i in lengths]) - self[lengths] = splitbits - return splitbits - - def _freeze_(self): - # as this class is not in __builtin__, we need to explicitly tell - # the flow space that the object is frozen and the accesses can - # be constant-folded. - return True - -splitter = BitSplitter() diff --git a/rpython/rlib/test/test_bitmanipulation.py b/rpython/rlib/test/test_bitmanipulation.py deleted file mode 100644 --- a/rpython/rlib/test/test_bitmanipulation.py +++ /dev/null @@ -1,15 +0,0 @@ -from rpython.rlib.bitmanipulation import splitter - - -def test_simple_splitbits(): - assert ((1, ) * 4) == splitter[8,8,8,8](0x01010101) - assert ((255, ) * 4) == splitter[8,8,8,8](0xFfFfFfFf) - -def test_fancy_splitbits(): - assert (4,3,2,1) == splitter[8,8,8,8](0x01020304) - assert (1,3,7,15) == splitter[1,2,3,4](0xFfFfFfFf) - -def test_format_splitbits(): - x = 0xAA - assert (x & 3, ) == splitter[2](x) - From noreply at buildbot.pypy.org Wed Apr 23 21:18:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 23 Apr 2014 21:18:55 +0200 (CEST) Subject: [pypy-commit] pypy default: fix ufunc reduce with single axis tuple (issue1718) Message-ID: <20140423191855.B08231D2BE5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70890:d91034c74551 Date: 2014-04-23 15:17 -0400 http://bitbucket.org/pypy/pypy/changeset/d91034c74551/ Log: fix ufunc reduce with single axis tuple (issue1718) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -136,6 +136,11 @@ def newcomplex(self, r, i): return ComplexObject(r, i) + def getitem(self, obj, index): + assert isinstance(obj, ListObject) + assert isinstance(index, IntObject) + return obj.items[index.intval] + def listview(self, obj, number=-1): assert isinstance(obj, ListObject) if number != -1: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1506,6 +1506,9 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.max() == 5.7 + assert a.max().shape == () + assert a.max(axis=(0,)) == 5.7 + assert a.max(axis=(0,)).shape == () assert a.max(keepdims=True) == 5.7 assert a.max(keepdims=True).shape == (1,) b = array([]) @@ -1521,6 +1524,9 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.min() == -3.0 + assert a.min().shape == () + assert a.min(axis=(0,)) == -3.0 + assert a.min(axis=(0,)).shape == () assert a.min(keepdims=True) == -3.0 assert a.min(keepdims=True).shape == (1,) b = array([]) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -772,6 +772,7 @@ a = zeros((2, 2)) + 1 assert (add.reduce(a, axis=1) == [2, 2]).all() + assert (add.reduce(a, axis=(1,)) == [2, 2]).all() exc = raises(ValueError, add.reduce, a, axis=2) assert exc.value[0] == "'axis' entry is out of bounds" diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -178,6 +178,8 @@ if space.is_none(w_axis): axis = maxint else: + if space.isinstance_w(w_axis, space.w_tuple) and space.len_w(w_axis) == 1: + w_axis = space.getitem(w_axis, space.wrap(0)) axis = space.int_w(w_axis) if axis < -shapelen or axis >= shapelen: raise oefmt(space.w_ValueError, "'axis' entry is out of bounds") From noreply at buildbot.pypy.org Wed Apr 23 21:42:28 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 23 Apr 2014 21:42:28 +0200 (CEST) Subject: [pypy-commit] pypy default: copy release announcement from release-2.3.x branch Message-ID: <20140423194228.165071C02F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70891:eca7b4daa067 Date: 2014-04-23 22:41 +0300 http://bitbucket.org/pypy/pypy/changeset/eca7b4daa067/ Log: copy release announcement from release-2.3.x branch diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski From noreply at buildbot.pypy.org Wed Apr 23 21:50:47 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 23 Apr 2014 21:50:47 +0200 (CEST) Subject: [pypy-commit] pypy default: fixes to elidable; now warning free Message-ID: <20140423195047.527AF1C06C3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70892:a637c43f17fa Date: 2014-04-23 12:45 -0700 http://bitbucket.org/pypy/pypy/changeset/a637c43f17fa/ Log: fixes to elidable; now warning free diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -216,11 +216,20 @@ 'stdstring2stdstring' : ([c_object], c_object), } + # size/offset are backend-specific but fixed after load + self.c_sizeof_farg = 0 + self.c_offset_farg = 0 + + def load_reflection_library(space): state = space.fromcache(State) if state.library is None: from pypy.module._cffi_backend.libraryobj import W_Library state.library = W_Library(space, reflection_library, rdynload.RTLD_LOCAL | rdynload.RTLD_LAZY) + if state.library: + # fix constants + state.c_sizeof_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) + state.c_offset_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) return state.library def verify_backend(space): @@ -340,12 +349,12 @@ return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) def c_deallocate_function_args(space, cargs): call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) - at jit.elidable def c_function_arg_sizeof(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) - at jit.elidable + state = space.fromcache(State) + return state.c_sizeof_farg def c_function_arg_typeoffset(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) + state = space.fromcache(State) + return state.c_offset_farg # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): @@ -365,13 +374,12 @@ def c_base_name(space, cppclass, base_index): args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) - at jit.elidable_promote('2') def c_is_subtype(space, derived, base): + jit.promote(base) if derived == base: return bool(1) return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) - at jit.elidable_promote('1,2,4') def _c_base_offset(space, derived_h, base_h, address, direction): args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -604,12 +604,10 @@ def get_returntype(self): return self.space.wrap(self.converter.name) - @jit.elidable_promote() def _get_offset(self, cppinstance): if cppinstance: assert lltype.typeOf(cppinstance.cppclass.handle) == lltype.typeOf(self.scope.handle) - offset = self.offset + capi.c_base_offset(self.space, - cppinstance.cppclass, self.scope, cppinstance.get_rawobject(), 1) + offset = self.offset + cppinstance.cppclass.get_base_offset(cppinstance, self.scope) else: offset = self.offset return offset @@ -739,7 +737,6 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote() def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature @@ -908,6 +905,10 @@ def find_datamember(self, name): raise self.missing_attribute_error(name) + def get_base_offset(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + return 0 + def get_cppthis(self, cppinstance, calling_scope): assert self == cppinstance.cppclass return cppinstance.get_rawobject() @@ -939,10 +940,15 @@ class W_ComplexCPPClass(W_CPPClass): - def get_cppthis(self, cppinstance, calling_scope): + def get_base_offset(self, cppinstance, calling_scope): assert self == cppinstance.cppclass offset = capi.c_base_offset(self.space, self, calling_scope, cppinstance.get_rawobject(), 1) + return offset + + def get_cppthis(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + offset = self.get_base_offset(cppinstance, calling_scope) return capi.direct_ptradd(cppinstance.get_rawobject(), offset) W_ComplexCPPClass.typedef = TypeDef( From noreply at buildbot.pypy.org Wed Apr 23 21:50:48 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 23 Apr 2014 21:50:48 +0200 (CEST) Subject: [pypy-commit] pypy default: merge default into branch Message-ID: <20140423195048.8FE4E1C06C3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70893:822c0b8d758b Date: 2014-04-23 12:46 -0700 http://bitbucket.org/pypy/pypy/changeset/822c0b8d758b/ Log: merge default into branch diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -6,7 +6,7 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import jit_libffi, rfloat -from pypy.module._rawffi.interp_rawffi import unpack_simple_shape +from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -132,7 +132,7 @@ def __getattr__(self, name): if name.startswith('array_'): typecode = name[len('array_'):] - arr = self.space.interp_w(W_Array, unpack_simple_shape(self.space, self.space.wrap(typecode))) + arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode)) setattr(self, name, arr) return arr raise AttributeError(name) @@ -409,7 +409,7 @@ if ptrval == 0: from pypy.module.cppyy import interp_cppyy return interp_cppyy.get_nullptr(space) - arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + arr = space.interp_w(W_Array, letter2tp(space, 'P')) return arr.fromaddress(space, ptrval, sys.maxint) def to_memory(self, space, w_obj, w_value, offset): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -136,6 +136,11 @@ def newcomplex(self, r, i): return ComplexObject(r, i) + def getitem(self, obj, index): + assert isinstance(obj, ListObject) + assert isinstance(index, IntObject) + return obj.items[index.intval] + def listview(self, obj, number=-1): assert isinstance(obj, ListObject) if number != -1: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1506,6 +1506,9 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.max() == 5.7 + assert a.max().shape == () + assert a.max(axis=(0,)) == 5.7 + assert a.max(axis=(0,)).shape == () assert a.max(keepdims=True) == 5.7 assert a.max(keepdims=True).shape == (1,) b = array([]) @@ -1521,6 +1524,9 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.min() == -3.0 + assert a.min().shape == () + assert a.min(axis=(0,)) == -3.0 + assert a.min(axis=(0,)).shape == () assert a.min(keepdims=True) == -3.0 assert a.min(keepdims=True).shape == (1,) b = array([]) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -772,6 +772,7 @@ a = zeros((2, 2)) + 1 assert (add.reduce(a, axis=1) == [2, 2]).all() + assert (add.reduce(a, axis=(1,)) == [2, 2]).all() exc = raises(ValueError, add.reduce, a, axis=2) assert exc.value[0] == "'axis' entry is out of bounds" diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -178,6 +178,8 @@ if space.is_none(w_axis): axis = maxint else: + if space.isinstance_w(w_axis, space.w_tuple) and space.len_w(w_axis) == 1: + w_axis = space.getitem(w_axis, space.wrap(0)) axis = space.int_w(w_axis) if axis < -shapelen or axis >= shapelen: raise oefmt(space.w_ValueError, "'axis' entry is out of bounds") diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -385,7 +385,6 @@ self._add_pending_fields(pending_setfields) storage.rd_consts = self.memo.consts - dump_storage(storage, liveboxes) return liveboxes[:] def _number_virtuals(self, liveboxes, optimizer, num_env_virtuals): @@ -1457,50 +1456,3 @@ def int_add_const(self, base, offset): return base + offset - -# ____________________________________________________________ - -def dump_storage(storage, liveboxes): - "For profiling only." - debug_start("jit-resume") - if have_debug_prints(): - debug_print('Log storage', compute_unique_id(storage)) - frameinfo = storage.rd_frame_info_list - while frameinfo is not None: - try: - jitcodename = frameinfo.jitcode.name - except AttributeError: - jitcodename = str(compute_unique_id(frameinfo.jitcode)) - debug_print('\tjitcode/pc', jitcodename, - frameinfo.pc, - 'at', compute_unique_id(frameinfo)) - frameinfo = frameinfo.prev - numb = storage.rd_numb - while numb: - debug_print('\tnumb', str([untag(numb.nums[i]) - for i in range(len(numb.nums))]), - 'at', compute_unique_id(numb)) - numb = numb.prev - for const in storage.rd_consts: - debug_print('\tconst', const.repr_rpython()) - for box in liveboxes: - if box is None: - debug_print('\tbox', 'None') - else: - debug_print('\tbox', box.repr_rpython()) - if storage.rd_virtuals is not None: - for virtual in storage.rd_virtuals: - if virtual is None: - debug_print('\t\t', 'None') - else: - virtual.debug_prints() - if storage.rd_pendingfields: - debug_print('\tpending setfields') - for i in range(len(storage.rd_pendingfields)): - lldescr = storage.rd_pendingfields[i].lldescr - num = storage.rd_pendingfields[i].num - fieldnum = storage.rd_pendingfields[i].fieldnum - itemindex = storage.rd_pendingfields[i].itemindex - debug_print("\t\t", str(lldescr), str(untag(num)), str(untag(fieldnum)), itemindex) - - debug_stop("jit-resume") diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -31,7 +31,55 @@ except KeyError: value = self.values[box] = OptValue(box) return value - + + +# ____________________________________________________________ + +def dump_storage(storage, liveboxes): + "For profiling only." + debug_start("jit-resume") + if have_debug_prints(): + debug_print('Log storage', compute_unique_id(storage)) + frameinfo = storage.rd_frame_info_list + while frameinfo is not None: + try: + jitcodename = frameinfo.jitcode.name + except AttributeError: + jitcodename = str(compute_unique_id(frameinfo.jitcode)) + debug_print('\tjitcode/pc', jitcodename, + frameinfo.pc, + 'at', compute_unique_id(frameinfo)) + frameinfo = frameinfo.prev + numb = storage.rd_numb + while numb: + debug_print('\tnumb', str([untag(numb.nums[i]) + for i in range(len(numb.nums))]), + 'at', compute_unique_id(numb)) + numb = numb.prev + for const in storage.rd_consts: + debug_print('\tconst', const.repr_rpython()) + for box in liveboxes: + if box is None: + debug_print('\tbox', 'None') + else: + debug_print('\tbox', box.repr_rpython()) + if storage.rd_virtuals is not None: + for virtual in storage.rd_virtuals: + if virtual is None: + debug_print('\t\t', 'None') + else: + virtual.debug_prints() + if storage.rd_pendingfields: + debug_print('\tpending setfields') + for i in range(len(storage.rd_pendingfields)): + lldescr = storage.rd_pendingfields[i].lldescr + num = storage.rd_pendingfields[i].num + fieldnum = storage.rd_pendingfields[i].fieldnum + itemindex = storage.rd_pendingfields[i].itemindex + debug_print("\t\t", str(lldescr), str(untag(num)), str(untag(fieldnum)), itemindex) + + debug_stop("jit-resume") + def test_tag(): assert tag(3, 1) == rffi.r_short(3<<2|1) diff --git a/rpython/rlib/bitmanipulation.py b/rpython/rlib/bitmanipulation.py deleted file mode 100644 --- a/rpython/rlib/bitmanipulation.py +++ /dev/null @@ -1,32 +0,0 @@ -from rpython.rlib import unroll - - -class BitSplitter(dict): - def __getitem__(self, lengths): - if isinstance(lengths, int): - lengths = (lengths, ) - if lengths in self: - return dict.__getitem__(self, lengths) - unrolling_lenghts = unroll.unrolling_iterable(lengths) - def splitbits(integer): - result = () - sum = 0 - for length in unrolling_lenghts: - sum += length - n = integer & ((1<= 0 - result += (n, ) - integer = integer >> length - assert sum <= 32 - return result - splitbits.func_name += "_" + "_".join([str(i) for i in lengths]) - self[lengths] = splitbits - return splitbits - - def _freeze_(self): - # as this class is not in __builtin__, we need to explicitly tell - # the flow space that the object is frozen and the accesses can - # be constant-folded. - return True - -splitter = BitSplitter() diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -3,7 +3,9 @@ from rpython.rlib import rgc from rpython.rlib.rstring import StringBuilder +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from rpython.rtyper.tool import rffi_platform from rpython.translator.platform import platform as compiler, CompilationError from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -347,8 +349,7 @@ """ # Prepare the input buffer for the stream with lltype.scoped_alloc(rffi.CCHARP.TO, len(data)) as inbuf: - for i in xrange(len(data)): - inbuf[i] = data[i] + copy_string_to_raw(llstr(data), inbuf, 0, len(data)) stream.c_next_in = rffi.cast(Bytefp, inbuf) rffi.setintfield(stream, 'c_avail_in', len(data)) diff --git a/rpython/rlib/test/test_bitmanipulation.py b/rpython/rlib/test/test_bitmanipulation.py deleted file mode 100644 --- a/rpython/rlib/test/test_bitmanipulation.py +++ /dev/null @@ -1,15 +0,0 @@ -from rpython.rlib.bitmanipulation import splitter - - -def test_simple_splitbits(): - assert ((1, ) * 4) == splitter[8,8,8,8](0x01010101) - assert ((255, ) * 4) == splitter[8,8,8,8](0xFfFfFfFf) - -def test_fancy_splitbits(): - assert (4,3,2,1) == splitter[8,8,8,8](0x01020304) - assert (1,3,7,15) == splitter[1,2,3,4](0xFfFfFfFf) - -def test_format_splitbits(): - x = 0xAA - assert (x & 3, ) == splitter[2](x) - From noreply at buildbot.pypy.org Wed Apr 23 21:50:49 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 23 Apr 2014 21:50:49 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20140423195049.CAFCE1C06C3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70894:72e0d4578688 Date: 2014-04-23 12:48 -0700 http://bitbucket.org/pypy/pypy/changeset/72e0d4578688/ Log: merge default into branch diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -216,11 +216,20 @@ 'stdstring2stdstring' : ([c_object], c_object), } + # size/offset are backend-specific but fixed after load + self.c_sizeof_farg = 0 + self.c_offset_farg = 0 + + def load_reflection_library(space): state = space.fromcache(State) if state.library is None: from pypy.module._cffi_backend.libraryobj import W_Library state.library = W_Library(space, reflection_library, rdynload.RTLD_LOCAL | rdynload.RTLD_LAZY) + if state.library: + # fix constants + state.c_sizeof_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) + state.c_offset_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) return state.library def verify_backend(space): @@ -340,12 +349,12 @@ return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) def c_deallocate_function_args(space, cargs): call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) - at jit.elidable def c_function_arg_sizeof(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) - at jit.elidable + state = space.fromcache(State) + return state.c_sizeof_farg def c_function_arg_typeoffset(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) + state = space.fromcache(State) + return state.c_offset_farg # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): @@ -365,13 +374,12 @@ def c_base_name(space, cppclass, base_index): args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) - at jit.elidable_promote('2') def c_is_subtype(space, derived, base): + jit.promote(base) if derived == base: return bool(1) return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) - at jit.elidable_promote('1,2,4') def _c_base_offset(space, derived_h, base_h, address, direction): args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -6,7 +6,7 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import jit_libffi, rfloat -from pypy.module._rawffi.interp_rawffi import unpack_simple_shape +from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -132,7 +132,7 @@ def __getattr__(self, name): if name.startswith('array_'): typecode = name[len('array_'):] - arr = self.space.interp_w(W_Array, unpack_simple_shape(self.space, self.space.wrap(typecode))) + arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode)) setattr(self, name, arr) return arr raise AttributeError(name) @@ -409,7 +409,7 @@ if ptrval == 0: from pypy.module.cppyy import interp_cppyy return interp_cppyy.get_nullptr(space) - arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + arr = space.interp_w(W_Array, letter2tp(space, 'P')) return arr.fromaddress(space, ptrval, sys.maxint) def to_memory(self, space, w_obj, w_value, offset): diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -604,12 +604,10 @@ def get_returntype(self): return self.space.wrap(self.converter.name) - @jit.elidable_promote() def _get_offset(self, cppinstance): if cppinstance: assert lltype.typeOf(cppinstance.cppclass.handle) == lltype.typeOf(self.scope.handle) - offset = self.offset + capi.c_base_offset(self.space, - cppinstance.cppclass, self.scope, cppinstance.get_rawobject(), 1) + offset = self.offset + cppinstance.cppclass.get_base_offset(cppinstance, self.scope) else: offset = self.offset return offset @@ -739,7 +737,6 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote() def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature @@ -908,6 +905,10 @@ def find_datamember(self, name): raise self.missing_attribute_error(name) + def get_base_offset(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + return 0 + def get_cppthis(self, cppinstance, calling_scope): assert self == cppinstance.cppclass return cppinstance.get_rawobject() @@ -939,10 +940,15 @@ class W_ComplexCPPClass(W_CPPClass): - def get_cppthis(self, cppinstance, calling_scope): + def get_base_offset(self, cppinstance, calling_scope): assert self == cppinstance.cppclass offset = capi.c_base_offset(self.space, self, calling_scope, cppinstance.get_rawobject(), 1) + return offset + + def get_cppthis(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + offset = self.get_base_offset(cppinstance, calling_scope) return capi.direct_ptradd(cppinstance.get_rawobject(), offset) W_ComplexCPPClass.typedef = TypeDef( diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -136,6 +136,11 @@ def newcomplex(self, r, i): return ComplexObject(r, i) + def getitem(self, obj, index): + assert isinstance(obj, ListObject) + assert isinstance(index, IntObject) + return obj.items[index.intval] + def listview(self, obj, number=-1): assert isinstance(obj, ListObject) if number != -1: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1506,6 +1506,9 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.max() == 5.7 + assert a.max().shape == () + assert a.max(axis=(0,)) == 5.7 + assert a.max(axis=(0,)).shape == () assert a.max(keepdims=True) == 5.7 assert a.max(keepdims=True).shape == (1,) b = array([]) @@ -1521,6 +1524,9 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.min() == -3.0 + assert a.min().shape == () + assert a.min(axis=(0,)) == -3.0 + assert a.min(axis=(0,)).shape == () assert a.min(keepdims=True) == -3.0 assert a.min(keepdims=True).shape == (1,) b = array([]) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -772,6 +772,7 @@ a = zeros((2, 2)) + 1 assert (add.reduce(a, axis=1) == [2, 2]).all() + assert (add.reduce(a, axis=(1,)) == [2, 2]).all() exc = raises(ValueError, add.reduce, a, axis=2) assert exc.value[0] == "'axis' entry is out of bounds" diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -178,6 +178,8 @@ if space.is_none(w_axis): axis = maxint else: + if space.isinstance_w(w_axis, space.w_tuple) and space.len_w(w_axis) == 1: + w_axis = space.getitem(w_axis, space.wrap(0)) axis = space.int_w(w_axis) if axis < -shapelen or axis >= shapelen: raise oefmt(space.w_ValueError, "'axis' entry is out of bounds") diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -385,7 +385,6 @@ self._add_pending_fields(pending_setfields) storage.rd_consts = self.memo.consts - dump_storage(storage, liveboxes) return liveboxes[:] def _number_virtuals(self, liveboxes, optimizer, num_env_virtuals): @@ -1457,50 +1456,3 @@ def int_add_const(self, base, offset): return base + offset - -# ____________________________________________________________ - -def dump_storage(storage, liveboxes): - "For profiling only." - debug_start("jit-resume") - if have_debug_prints(): - debug_print('Log storage', compute_unique_id(storage)) - frameinfo = storage.rd_frame_info_list - while frameinfo is not None: - try: - jitcodename = frameinfo.jitcode.name - except AttributeError: - jitcodename = str(compute_unique_id(frameinfo.jitcode)) - debug_print('\tjitcode/pc', jitcodename, - frameinfo.pc, - 'at', compute_unique_id(frameinfo)) - frameinfo = frameinfo.prev - numb = storage.rd_numb - while numb: - debug_print('\tnumb', str([untag(numb.nums[i]) - for i in range(len(numb.nums))]), - 'at', compute_unique_id(numb)) - numb = numb.prev - for const in storage.rd_consts: - debug_print('\tconst', const.repr_rpython()) - for box in liveboxes: - if box is None: - debug_print('\tbox', 'None') - else: - debug_print('\tbox', box.repr_rpython()) - if storage.rd_virtuals is not None: - for virtual in storage.rd_virtuals: - if virtual is None: - debug_print('\t\t', 'None') - else: - virtual.debug_prints() - if storage.rd_pendingfields: - debug_print('\tpending setfields') - for i in range(len(storage.rd_pendingfields)): - lldescr = storage.rd_pendingfields[i].lldescr - num = storage.rd_pendingfields[i].num - fieldnum = storage.rd_pendingfields[i].fieldnum - itemindex = storage.rd_pendingfields[i].itemindex - debug_print("\t\t", str(lldescr), str(untag(num)), str(untag(fieldnum)), itemindex) - - debug_stop("jit-resume") diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -31,7 +31,55 @@ except KeyError: value = self.values[box] = OptValue(box) return value - + + +# ____________________________________________________________ + +def dump_storage(storage, liveboxes): + "For profiling only." + debug_start("jit-resume") + if have_debug_prints(): + debug_print('Log storage', compute_unique_id(storage)) + frameinfo = storage.rd_frame_info_list + while frameinfo is not None: + try: + jitcodename = frameinfo.jitcode.name + except AttributeError: + jitcodename = str(compute_unique_id(frameinfo.jitcode)) + debug_print('\tjitcode/pc', jitcodename, + frameinfo.pc, + 'at', compute_unique_id(frameinfo)) + frameinfo = frameinfo.prev + numb = storage.rd_numb + while numb: + debug_print('\tnumb', str([untag(numb.nums[i]) + for i in range(len(numb.nums))]), + 'at', compute_unique_id(numb)) + numb = numb.prev + for const in storage.rd_consts: + debug_print('\tconst', const.repr_rpython()) + for box in liveboxes: + if box is None: + debug_print('\tbox', 'None') + else: + debug_print('\tbox', box.repr_rpython()) + if storage.rd_virtuals is not None: + for virtual in storage.rd_virtuals: + if virtual is None: + debug_print('\t\t', 'None') + else: + virtual.debug_prints() + if storage.rd_pendingfields: + debug_print('\tpending setfields') + for i in range(len(storage.rd_pendingfields)): + lldescr = storage.rd_pendingfields[i].lldescr + num = storage.rd_pendingfields[i].num + fieldnum = storage.rd_pendingfields[i].fieldnum + itemindex = storage.rd_pendingfields[i].itemindex + debug_print("\t\t", str(lldescr), str(untag(num)), str(untag(fieldnum)), itemindex) + + debug_stop("jit-resume") + def test_tag(): assert tag(3, 1) == rffi.r_short(3<<2|1) diff --git a/rpython/rlib/bitmanipulation.py b/rpython/rlib/bitmanipulation.py deleted file mode 100644 --- a/rpython/rlib/bitmanipulation.py +++ /dev/null @@ -1,32 +0,0 @@ -from rpython.rlib import unroll - - -class BitSplitter(dict): - def __getitem__(self, lengths): - if isinstance(lengths, int): - lengths = (lengths, ) - if lengths in self: - return dict.__getitem__(self, lengths) - unrolling_lenghts = unroll.unrolling_iterable(lengths) - def splitbits(integer): - result = () - sum = 0 - for length in unrolling_lenghts: - sum += length - n = integer & ((1<= 0 - result += (n, ) - integer = integer >> length - assert sum <= 32 - return result - splitbits.func_name += "_" + "_".join([str(i) for i in lengths]) - self[lengths] = splitbits - return splitbits - - def _freeze_(self): - # as this class is not in __builtin__, we need to explicitly tell - # the flow space that the object is frozen and the accesses can - # be constant-folded. - return True - -splitter = BitSplitter() diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -3,7 +3,9 @@ from rpython.rlib import rgc from rpython.rlib.rstring import StringBuilder +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from rpython.rtyper.tool import rffi_platform from rpython.translator.platform import platform as compiler, CompilationError from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -347,8 +349,7 @@ """ # Prepare the input buffer for the stream with lltype.scoped_alloc(rffi.CCHARP.TO, len(data)) as inbuf: - for i in xrange(len(data)): - inbuf[i] = data[i] + copy_string_to_raw(llstr(data), inbuf, 0, len(data)) stream.c_next_in = rffi.cast(Bytefp, inbuf) rffi.setintfield(stream, 'c_avail_in', len(data)) diff --git a/rpython/rlib/test/test_bitmanipulation.py b/rpython/rlib/test/test_bitmanipulation.py deleted file mode 100644 --- a/rpython/rlib/test/test_bitmanipulation.py +++ /dev/null @@ -1,15 +0,0 @@ -from rpython.rlib.bitmanipulation import splitter - - -def test_simple_splitbits(): - assert ((1, ) * 4) == splitter[8,8,8,8](0x01010101) - assert ((255, ) * 4) == splitter[8,8,8,8](0xFfFfFfFf) - -def test_fancy_splitbits(): - assert (4,3,2,1) == splitter[8,8,8,8](0x01020304) - assert (1,3,7,15) == splitter[1,2,3,4](0xFfFfFfFf) - -def test_format_splitbits(): - x = 0xAA - assert (x & 3, ) == splitter[2](x) - diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -340,12 +340,8 @@ def test_connect_with_timeout_fail(): s = RSocket() s.settimeout(0.1) - if sys.platform == 'win32': - addr = '169.254.169.254' - else: - addr = '240.240.240.240' with py.test.raises(SocketTimeout): - s.connect(INETAddress(addr, 12345)) + s.connect(INETAddress('10.255.255.10', 12345)) s.close() def test_connect_with_timeout_succeed(): From noreply at buildbot.pypy.org Wed Apr 23 22:10:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Apr 2014 22:10:21 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Oups, forgot to update the value here too Message-ID: <20140423201021.6D6251C099D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r490:ca5bf8248220 Date: 2014-04-23 22:10 +0200 http://bitbucket.org/pypy/pypy.org/changeset/ca5bf8248220/ Log: Oups, forgot to update the value here too diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,7 +9,7 @@ diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,7 +9,7 @@ From noreply at buildbot.pypy.org Wed Apr 23 22:20:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Apr 2014 22:20:39 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140423202039.176981C099D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r491:fe20b1d536e4 Date: 2014-04-23 22:20 +0200 http://bitbucket.org/pypy/pypy.org/changeset/fe20b1d536e4/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $50742 of $105000 (48.3%) + $50852 of $105000 (48.4%)
      diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $47796 of $60000 (79.6%) + $48121 of $60000 (80.2%)
      diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,15 +9,15 @@ + 2nd call: - 2nd call: $0 of $80000 (0.0%) -
      (does not update automatically) + $2097 of $80000 (2.6%)
      From noreply at buildbot.pypy.org Wed Apr 23 23:42:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 23 Apr 2014 23:42:28 +0200 (CEST) Subject: [pypy-commit] pypy default: create rsre_core.BufMatchContext, use to avoid copying Message-ID: <20140423214228.1BF021C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70895:42dcc26ee1d1 Date: 2014-04-23 16:11 -0400 http://bitbucket.org/pypy/pypy/changeset/42dcc26ee1d1/ Log: create rsre_core.BufMatchContext, use to avoid copying diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -34,8 +34,8 @@ def slice_w(space, ctx, start, end, w_default): if 0 <= start <= end: - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string[start:end]) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,7 +98,7 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a StrMatchContext or a UnicodeMatchContext for searching + """Make a BufMatchContext or a UnicodeMatchContext for searching in the given w_string object.""" space = self.space if pos < 0: @@ -114,12 +114,14 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - str = space.bufferstr_w(w_string) - if pos > len(str): - pos = len(str) - if endpos > len(str): - endpos = len(str) - return rsre_core.StrMatchContext(self.code, str, + buf = space.buffer_w(w_string) + size = buf.getlength() + assert size >= 0 + if pos > size: + pos = size + if endpos > size: + endpos = size + return rsre_core.BufMatchContext(self.code, buf, pos, endpos, self.flags) def getmatch(self, ctx, found): @@ -477,8 +479,8 @@ def fget_string(self, space): ctx = self.ctx - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.as_str()) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/rpython/rlib/rsre/rsre_core.py b/rpython/rlib/rsre/rsre_core.py --- a/rpython/rlib/rsre/rsre_core.py +++ b/rpython/rlib/rsre/rsre_core.py @@ -62,7 +62,8 @@ # Install a copy of the function under the name '_spec_funcname' in each # concrete subclass specialized_methods = [] - for prefix, concreteclass in [('str', StrMatchContext), + for prefix, concreteclass in [('buf', BufMatchContext), + ('str', StrMatchContext), ('uni', UnicodeMatchContext)]: newfunc = func_with_new_name(func, prefix + specname) assert not hasattr(concreteclass, specname) @@ -170,6 +171,27 @@ def fresh_copy(self, start): raise NotImplementedError +class BufMatchContext(AbstractMatchContext): + """Concrete subclass for matching in a buffer.""" + + _immutable_fields_ = ["_buffer"] + + def __init__(self, pattern, buf, match_start, end, flags): + AbstractMatchContext.__init__(self, pattern, match_start, end, flags) + self._buffer = buf + + def str(self, index): + check_nonneg(index) + return ord(self._buffer.getitem(index)) + + def lowstr(self, index): + c = self.str(index) + return rsre_char.getlower(c, self.flags) + + def fresh_copy(self, start): + return BufMatchContext(self.pattern, self._buffer, start, + self.end, self.flags) + class StrMatchContext(AbstractMatchContext): """Concrete subclass for matching in a plain string.""" diff --git a/rpython/rlib/rsre/rsre_jit.py b/rpython/rlib/rsre/rsre_jit.py --- a/rpython/rlib/rsre/rsre_jit.py +++ b/rpython/rlib/rsre/rsre_jit.py @@ -33,9 +33,11 @@ setattr(AbstractMatchContext, 'jitdriver_' + name, jitdriver) def install_jitdriver_spec(name, **kwds): + from rpython.rlib.rsre.rsre_core import BufMatchContext from rpython.rlib.rsre.rsre_core import StrMatchContext from rpython.rlib.rsre.rsre_core import UnicodeMatchContext - for prefix, concreteclass in [('Str', StrMatchContext), + for prefix, concreteclass in [('Buf', BufMatchContext), + ('Str', StrMatchContext), ('Uni', UnicodeMatchContext)]: jitdriver = RSreJitDriver(prefix + name, **kwds) setattr(concreteclass, 'jitdriver_' + name, jitdriver) From noreply at buildbot.pypy.org Thu Apr 24 00:05:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 00:05:32 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140423220532.814B01C099D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70896:134a1a7ab792 Date: 2014-04-23 18:04 -0400 http://bitbucket.org/pypy/pypy/changeset/134a1a7ab792/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,7 +5,7 @@ .. this is a revision shortly after release-2.3.x .. startrev: ba569fe1efdb - - .. branch: small-unroll-improvements Improve optimiziation of small allocation-heavy loops in the JIT + +.. branch: reflex-support From noreply at buildbot.pypy.org Thu Apr 24 00:37:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 00:37:07 +0200 (CEST) Subject: [pypy-commit] pypy default: try another ip to test timeout Message-ID: <20140423223707.9B4031C099D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70897:1b90f8057ef4 Date: 2014-04-23 18:36 -0400 http://bitbucket.org/pypy/pypy/changeset/1b90f8057ef4/ Log: try another ip to test timeout diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -341,7 +341,7 @@ s = RSocket() s.settimeout(0.1) with py.test.raises(SocketTimeout): - s.connect(INETAddress('10.255.255.10', 12345)) + s.connect(INETAddress('172.30.172.30', 12345)) s.close() def test_connect_with_timeout_succeed(): From noreply at buildbot.pypy.org Thu Apr 24 01:03:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 01:03:27 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: comments Message-ID: <20140423230327.BA8F11C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70898:8f8a5ff1a59a Date: 2014-04-23 19:02 -0400 http://bitbucket.org/pypy/pypy/changeset/8f8a5ff1a59a/ Log: comments diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1364,6 +1364,7 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) + # XXX define these flags correctly, possibly put elsewhere? BUF_SIMPLE = 0 BUF_FULL_RO = 1 BUF_CONTIG = 2 @@ -1401,6 +1402,7 @@ raise oefmt(self.w_TypeError, "expected a character buffer object") + # XXX rename these/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): From noreply at buildbot.pypy.org Thu Apr 24 01:06:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 01:06:56 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: merge default Message-ID: <20140423230656.4BE381C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70899:82aedd1ea884 Date: 2014-04-23 19:05 -0400 http://bitbucket.org/pypy/pypy/changeset/82aedd1ea884/ Log: merge default diff too long, truncating to 2000 out of 5612 lines diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,7 +5,7 @@ .. this is a revision shortly after release-2.3.x .. startrev: ba569fe1efdb - - .. branch: small-unroll-improvements Improve optimiziation of small allocation-heavy loops in the JIT + +.. branch: reflex-support diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -34,8 +34,8 @@ def slice_w(space, ctx, start, end, w_default): if 0 <= start <= end: - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string[start:end]) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,7 +98,7 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a StrMatchContext or a UnicodeMatchContext for searching + """Make a BufMatchContext or a UnicodeMatchContext for searching in the given w_string object.""" space = self.space if pos < 0: @@ -114,12 +114,14 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - str = space.bufferstr_w(w_string) - if pos > len(str): - pos = len(str) - if endpos > len(str): - endpos = len(str) - return rsre_core.StrMatchContext(self.code, str, + buf = space.buffer_w(w_string) + size = buf.getlength() + assert size >= 0 + if pos > size: + pos = size + if endpos > size: + endpos = size + return rsre_core.BufMatchContext(self.code, buf, pos, endpos, self.flags) def getmatch(self, ctx, found): @@ -477,8 +479,8 @@ def fget_string(self, space): ctx = self.ctx - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.as_str()) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -12,8 +12,10 @@ '_template_byname' : 'interp_cppyy.template_byname', '_std_string_name' : 'interp_cppyy.std_string_name', '_set_class_generator' : 'interp_cppyy.set_class_generator', + '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', + '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstance' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -190,7 +190,8 @@ [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_get_methptr_getter(space, cppscope, index): return _c_get_methptr_getter(cppscope.handle, index) @@ -214,7 +215,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -222,7 +224,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -283,7 +286,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('2') def c_is_subtype(space, derived, base): if derived == base: @@ -295,7 +299,8 @@ [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('1,2,4') def c_base_offset(space, derived, base, address, direction): if derived == base: @@ -543,19 +548,3 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) -_c_assign2stdstring = rffi.llexternal( - "cppyy_assign2stdstring", - [C_OBJECT, rffi.CCHARP], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_assign2stdstring(space, cppobject, svalue): - charp = rffi.str2charp(svalue) - _c_assign2stdstring(cppobject, charp) - rffi.free_charp(charp) -_c_free_stdstring = rffi.llexternal( - "cppyy_free_stdstring", - [C_OBJECT], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_free_stdstring(space, cppobject): - _c_free_stdstring(cppobject) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -6,8 +6,11 @@ from pypy.interpreter.baseobjspace import W_Root from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import libffi, rdynload +from rpython.tool.udir import udir + +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -19,21 +22,21 @@ if os.environ.get("ROOTSYS"): import commands (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] + if stat != 0: + rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: - rootincpath = [incdir] + rootincpath = [incdir, py.path.local(udir)] rootlibpath = commands.getoutput("root-config --libdir").split() else: - rootincpath = [] + rootincpath = [py.path.local(udir)] rootlibpath = [] def identify(): return 'CINT' -ts_reflect = False -ts_call = False +ts_reflect = True +ts_call = True ts_memory = False ts_helper = False @@ -47,13 +50,15 @@ _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) with rffi.scoped_str2charp('libCore.so') as ll_libname: _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) +with rffi.scoped_str2charp('libHist.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("cintcwrapper.cxx")], include_dirs=[incpath] + rootincpath, includes=["cintcwrapper.h"], library_dirs=rootlibpath, - libraries=["Core", "Cint"], + libraries=["Hist", "Core", "Cint"], use_cpp_linker=True, ) @@ -71,6 +76,23 @@ # CINT-specific pythonizations =============================================== +_c_charp2TString = rffi.llexternal( + "cppyy_charp2TString", + [rffi.CCHARP], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_charp2TString(space, svalue): + charp = rffi.str2charp(svalue) + result = _c_charp2TString(charp) + rffi.free_charp(charp) + return result +_c_TString2TString = rffi.llexternal( + "cppyy_TString2TString", + [C_OBJECT], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_TString2TString(space, cppobject): + return _c_TString2TString(cppobject) def _get_string_data(space, w_obj, m1, m2 = None): from pypy.module.cppyy import interp_cppyy @@ -80,10 +102,85 @@ return w_1 return obj.space.call_method(w_1, m2) +### TF1 ---------------------------------------------------------------------- +class State(object): + def __init__(self, space): + self.tfn_pyfuncs = [] + self.tfn_callbacks = [] + +_create_tf1 = rffi.llexternal( + "cppyy_create_tf1", + [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, + releasegil=False, + compilation_info=eci) + + at unwrap_spec(args_w='args_w') +def tf1_tf1(space, w_self, args_w): + """Pythonized version of TF1 constructor: + takes functions and callable objects, and allows a callback into them.""" + + from pypy.module.cppyy import interp_cppyy + tf1_class = interp_cppyy.scope_byname(space, "TF1") + + # expected signature: + # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) + argc = len(args_w) + + try: + # Note: argcount is +1 for the class (== w_self) + if argc < 5 or 6 < argc: + raise TypeError("wrong number of arguments") + + # second argument must be a name + funcname = space.str_w(args_w[1]) + + # last (optional) argument is number of parameters + npar = 0 + if argc == 6: npar = space.int_w(args_w[5]) + + # third argument must be a callable python object + w_callable = args_w[2] + if not space.is_true(space.callable(w_callable)): + raise TypeError("2nd argument is not a valid python callable") + + # generate a pointer to function + from pypy.module._cffi_backend import newtype, ctypefunc, func + + c_double = newtype.new_primitive_type(space, 'double') + c_doublep = newtype.new_pointer_type(space, c_double) + + # wrap the callable as the signature needs modifying + w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) + + w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) + w_callback = func.callback(space, w_cfunc, w_ifunc, None) + funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) + + # so far, so good; leaves on issue: CINT is expecting a wrapper, but + # we need the overload that takes a function pointer, which is not in + # the dictionary, hence this helper: + newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, + space.float_w(args_w[3]), space.float_w(args_w[4]), npar) + + from pypy.module.cppyy import interp_cppyy + w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, + do_cast=False, python_owns=True, fresh=True) + + # tie all the life times to the TF1 instance + space.setattr(w_instance, space.wrap('_callback'), w_callback) + + return w_instance + except (OperationError, TypeError, IndexError), e: + newargs_w = args_w[1:] # drop class + + # return control back to the original, unpythonized overload + ol = tf1_class.get_overload("TF1") + return ol.call(None, newargs_w) + ### TTree -------------------------------------------------------------------- _ttree_Branch = rffi.llexternal( "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], rffi.LONG, + [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, releasegil=False, compilation_info=eci) @@ -202,6 +299,8 @@ # some instance klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) w_obj = klass.construct() + # 0x10000 = kDeleteObject; reset because we own the object + space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) space.call_method(w_branch, "SetObject", w_obj) space.call_method(w_branch, "GetEntry", space.wrap(entry)) space.setattr(w_self, args_w[0], w_obj) @@ -274,6 +373,9 @@ allfuncs = [ + ### TF1 + tf1_tf1, + ### TTree ttree_Branch, ttree_iter, ttree_getattr, ] @@ -288,7 +390,14 @@ # callback coming in when app-level bound classes have been created def pythonize(space, name, w_pycppclass): - if name == "TFile": + if name == "TCollection": + _method_alias(space, w_pycppclass, "append", "Add") + _method_alias(space, w_pycppclass, "__len__", "GetSize") + + elif name == "TF1": + space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + + elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") elif name == "TObjString": @@ -310,3 +419,17 @@ elif name[0:8] == "TVectorT": # TVectorT<> template _method_alias(space, w_pycppclass, "__len__", "GetNoElements") + +# destruction callback (needs better solution, but this is for CINT +# only and should not appear outside of ROOT-specific uses) +from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL + + at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) +def _Py_cppyy_recursive_remove(space, cppobject): + from pypy.module.cppyy.interp_cppyy import memory_regulator + from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT + + obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) + if obj is not None: + memory_regulator.unregister(obj) + obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -0,0 +1,69 @@ +import py, os + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib import libffi, rdynload + +__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] + +pkgpath = py.path.local(__file__).dirpath().join(os.pardir) +srcpath = pkgpath.join("src") +incpath = pkgpath.join("include") + +import commands +(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") + +if os.environ.get("ROOTSYS"): + if config_stat != 0: # presumably Reflex-only + rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] + else: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() +else: + if config_stat == 0: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() + else: + rootincpath = [] + rootlibpath = [] + +def identify(): + return 'Cling' + +ts_reflect = False +ts_call = 'auto' +ts_memory = 'auto' +ts_helper = 'auto' + +std_string_name = 'std::basic_string' + +eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join("clingcwrapper.cxx")], + include_dirs=[incpath] + rootincpath, + includes=["clingcwrapper.h"], + library_dirs=rootlibpath, + libraries=["Cling"], + compile_extra=["-fno-strict-aliasing"], + use_cpp_linker=True, +) + +_c_load_dictionary = rffi.llexternal( + "cppyy_load_dictionary", + [rffi.CCHARP], rdynload.DLLHANDLE, + releasegil=False, + compilation_info=eci) + +def c_load_dictionary(name): + pch = _c_load_dictionary(name) + return pch + + +# Cling-specific pythonizations +def register_pythonizations(space): + "NOT_RPYTHON" + pass + +def pythonize(space, name, w_pycppclass): + pass diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -214,15 +214,22 @@ 'charp2stdstring' : ([c_ccharp], c_object), 'stdstring2stdstring' : ([c_object], c_object), - 'assign2stdstring' : ([c_object, c_ccharp], c_void), - 'free_stdstring' : ([c_object], c_void), } + # size/offset are backend-specific but fixed after load + self.c_sizeof_farg = 0 + self.c_offset_farg = 0 + + def load_reflection_library(space): state = space.fromcache(State) if state.library is None: from pypy.module._cffi_backend.libraryobj import W_Library state.library = W_Library(space, reflection_library, rdynload.RTLD_LOCAL | rdynload.RTLD_LAZY) + if state.library: + # fix constants + state.c_sizeof_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) + state.c_offset_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) return state.library def verify_backend(space): @@ -342,12 +349,12 @@ return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) def c_deallocate_function_args(space, cargs): call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) - at jit.elidable def c_function_arg_sizeof(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) - at jit.elidable + state = space.fromcache(State) + return state.c_sizeof_farg def c_function_arg_typeoffset(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) + state = space.fromcache(State) + return state.c_offset_farg # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): @@ -367,13 +374,12 @@ def c_base_name(space, cppclass, base_index): args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) - at jit.elidable_promote('2') def c_is_subtype(space, derived, base): + jit.promote(base) if derived == base: return bool(1) return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) - at jit.elidable_promote('1,2,4') def _c_base_offset(space, derived_h, base_h, address, direction): args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) @@ -504,11 +510,6 @@ return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) -def c_assign2stdstring(space, cppobject, svalue): - args = [_Arg(l=cppobject), _Arg(s=svalue)] - call_capi(space, 'assign2stdstring', args) -def c_free_stdstring(space, cppobject): - call_capi(space, 'free_stdstring', [_Arg(l=cppobject)]) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -6,8 +6,8 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import jit_libffi, rfloat -from pypy.module._rawffi.interp_rawffi import unpack_simple_shape -from pypy.module._rawffi.array import W_Array +from pypy.module._rawffi.interp_rawffi import letter2tp +from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -47,21 +47,35 @@ return rawobject return capi.C_NULL_OBJECT +def is_nullpointer_specialcase(space, w_obj): + # 0, None, and nullptr may serve as "NULL", check for any of them + + # integer 0 + try: + return space.int_w(w_obj) == 0 + except Exception: + pass + # None or nullptr + from pypy.module.cppyy import interp_cppyy + return space.is_true(space.is_(w_obj, space.w_None)) or \ + space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) + def get_rawbuffer(space, w_obj): + # raw buffer try: buf = space.buffer_w(w_obj, space.BUF_SIMPLE) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass - # special case: allow integer 0 as NULL + # array type try: - buf = space.int_w(w_obj) - if buf == 0: - return rffi.cast(rffi.VOIDP, 0) + arr = space.interp_w(W_ArrayInstance, w_obj, can_be_None=True) + if arr: + return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) except Exception: pass - # special case: allow None as NULL - if space.is_true(space.is_(w_obj, space.w_None)): + # pre-defined NULL + if is_nullpointer_specialcase(space, w_obj): return rffi.cast(rffi.VOIDP, 0) raise TypeError("not an addressable buffer") @@ -118,7 +132,7 @@ def __getattr__(self, name): if name.startswith('array_'): typecode = name[len('array_'):] - arr = self.space.interp_w(W_Array, unpack_simple_shape(self.space, self.space.wrap(typecode))) + arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode)) setattr(self, name, arr) return arr raise AttributeError(name) @@ -139,8 +153,6 @@ self.size = array_size def from_memory(self, space, w_obj, w_pycppclass, offset): - if hasattr(space, "fake"): - raise NotImplementedError # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) address = rffi.cast(rffi.ULONG, address_value) @@ -261,8 +273,7 @@ self.name = name def convert_argument(self, space, w_obj, address, call_local): - raise OperationError(space.w_TypeError, - space.wrap('no converter available for type "%s"' % self.name)) + self._is_abstract(space) class BoolConverter(ffitypes.typeid(bool), TypeConverter): @@ -372,7 +383,12 @@ try: obj = get_rawbuffer(space, w_obj) except TypeError: - obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) + try: + # TODO: accept a 'capsule' rather than naked int + # (do accept int(0), though) + obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + except Exception: + obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj def convert_argument(self, space, w_obj, address, call_local): @@ -385,6 +401,24 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) + def from_memory(self, space, w_obj, w_pycppclass, offset): + # returned as a long value for the address (INTPTR_T is not proper + # per se, but rffi does not come with a PTRDIFF_T) + address = self._get_raw_address(space, w_obj, offset) + ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0]) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + arr = space.interp_w(W_Array, letter2tp(space, 'P')) + return arr.fromaddress(space, ptrval, sys.maxint) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + if is_nullpointer_specialcase(space, w_value): + address[0] = rffi.cast(rffi.VOIDP, 0) + else: + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class VoidPtrPtrConverter(TypeConverter): _immutable_fields_ = ['uses_local'] @@ -412,7 +446,7 @@ _immutable_fields_ = ['uses_local'] uses_local = True -class InstancePtrConverter(TypeConverter): +class InstanceRefConverter(TypeConverter): _immutable_fields_ = ['libffitype', 'cppclass'] libffitype = jit_libffi.types.pointer @@ -444,17 +478,7 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_obj)) - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy - return interp_cppyy.wrap_cppobject(space, address, self.cppclass, - do_cast=False, is_ref=True) - - def to_memory(self, space, w_obj, w_value, offset): - address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) - address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) - -class InstanceConverter(InstancePtrConverter): +class InstanceConverter(InstanceRefConverter): def convert_argument_libffi(self, space, w_obj, address, call_local): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -468,6 +492,28 @@ def to_memory(self, space, w_obj, w_value, offset): self._is_abstract(space) + +class InstancePtrConverter(InstanceRefConverter): + + def _unwrap_object(self, space, w_obj): + try: + return InstanceRefConverter._unwrap_object(self, space, w_obj) + except OperationError, e: + # if not instance, allow certain special cases + if is_nullpointer_specialcase(space, w_obj): + return capi.C_NULL_OBJECT + raise e + + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, + do_cast=False, is_ref=True) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class InstancePtrPtrConverter(InstancePtrConverter): _immutable_fields_ = ['uses_local'] @@ -487,12 +533,6 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible - def from_memory(self, space, w_obj, w_pycppclass, offset): - self._is_abstract(space) - - def to_memory(self, space, w_obj, w_value, offset): - self._is_abstract(space) - def finalize_call(self, space, w_obj, call_local): from pypy.module.cppyy.interp_cppyy import W_CPPInstance assert isinstance(w_obj, W_CPPInstance) @@ -501,7 +541,6 @@ class StdStringConverter(InstanceConverter): - _immutable_fields_ = ['cppclass'] def __init__(self, space, extra): from pypy.module.cppyy import interp_cppyy @@ -509,24 +548,25 @@ InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - try: + from pypy.module.cppyy.interp_cppyy import W_CPPInstance + if isinstance(w_obj, W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.c_stdstring2stdstring(space, arg) + else: return capi.c_charp2stdstring(space, space.str_w(w_obj)) - except Exception, e: - arg = InstanceConverter._unwrap_object(self, space, w_obj) - result = capi.c_stdstring2stdstring(space, arg) - return result def to_memory(self, space, w_obj, w_value, offset): try: address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - capi.c_assign2stdstring(space, address, space.str_w(w_value)) - return + assign = self.cppclass.get_overload("__assign__") + from pypy.module.cppyy import interp_cppyy + assign.call( + interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False), [w_value]) except Exception: - pass - return InstanceConverter.to_memory(self, space, w_obj, w_value, offset) + InstanceConverter.to_memory(self, space, w_obj, w_value, offset) def free_argument(self, space, arg, call_local): - capi.c_free_stdstring(space, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) class StdStringRefConverter(InstancePtrConverter): _immutable_fields_ = ['cppclass'] @@ -570,6 +610,7 @@ def free_argument(self, space, arg, call_local): if hasattr(space, "fake"): raise NotImplementedError + space.getbuiltinmodule("cpyext") from pypy.module.cpyext.pyobject import Py_DecRef, PyObject Py_DecRef(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0])) @@ -627,8 +668,10 @@ # type check for the benefit of the annotator from pypy.module.cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) - if compound == "*" or compound == "&": + if compound == "*": return InstancePtrConverter(space, cppclass) + elif compound == "&": + return InstanceRefConverter(space, cppclass) elif compound == "**": return InstancePtrPtrConverter(space, cppclass) elif compound == "": @@ -654,7 +697,7 @@ _converters["void**"] = VoidPtrPtrConverter _converters["void*&"] = VoidPtrRefConverter -# special cases (note: CINT backend requires the simple name 'string') +# special cases (note: 'string' aliases added below) _converters["std::basic_string"] = StdStringConverter _converters["const std::basic_string&"] = StdStringConverter # TODO: shouldn't copy _converters["std::basic_string&"] = StdStringRefConverter @@ -776,3 +819,27 @@ for c_type, alias in aliases: _converters[alias] = _converters[c_type] _add_aliased_converters() + +# ROOT-specific converters (TODO: this is a general use case and should grow +# an API; putting it here is done only to circumvent circular imports) +if capi.identify() == "CINT": + + class TStringConverter(InstanceConverter): + def __init__(self, space, extra): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, "TString") + InstanceConverter.__init__(self, space, cppclass) + + def _unwrap_object(self, space, w_obj): + from pypy.module.cppyy import interp_cppyy + if isinstance(w_obj, interp_cppyy.W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.backend.c_TString2TString(space, arg) + else: + return capi.backend.c_charp2TString(space, space.str_w(w_obj)) + + def free_argument(self, space, arg, call_local): + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + + _converters["TString"] = TStringConverter + _converters["const TString&"] = TStringConverter diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -53,17 +53,12 @@ if hasattr(space, "fake"): raise NotImplementedError lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args) - address = rffi.cast(rffi.ULONG, lresult) + ptrval = rffi.cast(rffi.ULONG, lresult) arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap(self.typecode))) - if address == 0: - # TODO: fix this hack; fromaddress() will allocate memory if address - # is null and there seems to be no way around it (ll_buffer can not - # be touched directly) - nullarr = arr.fromaddress(space, address, 0) - assert isinstance(nullarr, W_ArrayInstance) - nullarr.free(space) - return nullarr - return arr.fromaddress(space, address, sys.maxint) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + return arr.fromaddress(space, ptrval, sys.maxint) class VoidExecutor(FunctionExecutor): @@ -144,7 +139,7 @@ from pypy.module.cppyy import interp_cppyy newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args) assert lltype.typeOf(newthis) == capi.C_OBJECT - return space.wrap(newthis) + return space.wrap(rffi.cast(rffi.LONG, newthis)) # really want ptrdiff_t here class InstancePtrExecutor(FunctionExecutor): @@ -160,7 +155,8 @@ from pypy.module.cppyy import interp_cppyy long_result = capi.c_call_l(space, cppmethod, cppthis, num_args, args) ptr_result = rffi.cast(capi.C_OBJECT, long_result) - return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + pyres = interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + return pyres def execute_libffi(self, space, cif_descr, funcaddr, buffer): jit_libffi.jit_ffi_call(cif_descr, funcaddr, buffer) @@ -189,7 +185,7 @@ long_result = capi.c_call_o(space, cppmethod, cppthis, num_args, args, self.cppclass) ptr_result = rffi.cast(capi.C_OBJECT, long_result) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass, - do_cast=False, python_owns=True) + do_cast=False, python_owns=True, fresh=True) def execute_libffi(self, space, cif_descr, funcaddr, buffer): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -206,6 +202,13 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible +class StdStringRefExecutor(InstancePtrExecutor): + + def __init__(self, space, cppclass): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) + InstancePtrExecutor.__init__(self, space, cppclass) + class PyObjectExecutor(PtrTypeExecutor): @@ -295,12 +298,12 @@ _executors["void*"] = PtrTypeExecutor _executors["const char*"] = CStringExecutor -# special cases +# special cases (note: 'string' aliases added below) _executors["constructor"] = ConstructorExecutor _executors["std::basic_string"] = StdStringExecutor -_executors["const std::basic_string&"] = StdStringExecutor -_executors["std::basic_string&"] = StdStringExecutor # TODO: shouldn't copy +_executors["const std::basic_string&"] = StdStringRefExecutor +_executors["std::basic_string&"] = StdStringRefExecutor _executors["PyObject*"] = PyObjectExecutor @@ -363,7 +366,11 @@ "NOT_RPYTHON" aliases = ( ("const char*", "char*"), + ("std::basic_string", "string"), + ("const std::basic_string&", "const string&"), + ("std::basic_string&", "string&"), + ("PyObject*", "_object*"), ) diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -89,11 +89,11 @@ cppyy_index_t cppyy_get_global_operator( cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op); - /* method properties ----------------------------------------------------- */ + /* method properties ------------------------------------------------------ */ int cppyy_is_constructor(cppyy_type_t type, cppyy_index_t idx); int cppyy_is_staticmethod(cppyy_type_t type, cppyy_index_t idx); - /* data member reflection information ------------------------------------ */ + /* data member reflection information ------------------------------------- */ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); @@ -101,7 +101,7 @@ int cppyy_datamember_index(cppyy_scope_t scope, const char* name); - /* data member properties ------------------------------------------------ */ + /* data member properties ------------------------------------------------- */ int cppyy_is_publicdata(cppyy_type_t type, int datamember_index); int cppyy_is_staticdata(cppyy_type_t type, int datamember_index); @@ -112,8 +112,6 @@ cppyy_object_t cppyy_charp2stdstring(const char* str); cppyy_object_t cppyy_stdstring2stdstring(cppyy_object_t ptr); - void cppyy_assign2stdstring(cppyy_object_t ptr, const char* str); - void cppyy_free_stdstring(cppyy_object_t ptr); #ifdef __cplusplus } diff --git a/pypy/module/cppyy/include/cintcwrapper.h b/pypy/module/cppyy/include/cintcwrapper.h --- a/pypy/module/cppyy/include/cintcwrapper.h +++ b/pypy/module/cppyy/include/cintcwrapper.h @@ -11,12 +11,18 @@ void* cppyy_load_dictionary(const char* lib_name); /* pythonization helpers */ + cppyy_object_t cppyy_create_tf1(const char* funcname, unsigned long address, + double xmin, double xmax, int npar); + cppyy_object_t cppyy_ttree_Branch( void* vtree, const char* branchname, const char* classname, void* addobj, int bufsize, int splitlevel); long long cppyy_ttree_GetEntry(void* vtree, long long entry); + cppyy_object_t cppyy_charp2TString(const char* str); + cppyy_object_t cppyy_TString2TString(cppyy_object_t ptr); + #ifdef __cplusplus } #endif // ifdef __cplusplus diff --git a/pypy/module/cppyy/include/clingcwrapper.h b/pypy/module/cppyy/include/clingcwrapper.h new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/include/clingcwrapper.h @@ -0,0 +1,37 @@ +#ifndef CPPYY_CLINGCWRAPPER +#define CPPYY_CLINGCWRAPPER + +#include "capi.h" + +#ifdef __cplusplus +extern "C" { +#endif // ifdef __cplusplus + + /* misc helpers */ + void* cppyy_load_dictionary(const char* lib_name); + +#ifdef __cplusplus +} +#endif // ifdef __cplusplus + +// TODO: pick up from llvm-config --cxxflags +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +// Wrapper callback: except this to become available from Cling directly +typedef void (*CPPYY_Cling_Wrapper_t)(void*, int, void**, void*); + +#endif // ifndef CPPYY_CLINGCWRAPPER diff --git a/pypy/module/cppyy/include/cppyy.h b/pypy/module/cppyy/include/cppyy.h --- a/pypy/module/cppyy/include/cppyy.h +++ b/pypy/module/cppyy/include/cppyy.h @@ -17,7 +17,7 @@ #ifdef __cplusplus struct CPPYY_G__p2p { #else -#typedef struct +typedef struct { #endif long i; int reftype; diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -40,9 +40,28 @@ def __init__(self, space): self.cppscope_cache = { "void" : W_CPPClass(space, "void", capi.C_NULL_TYPE) } + self.w_nullptr = None self.cpptemplate_cache = {} self.cppclass_registry = {} self.w_clgen_callback = None + self.w_fngen_callback = None + +def get_nullptr(space): + if hasattr(space, "fake"): + raise NotImplementedError + state = space.fromcache(State) + if state.w_nullptr is None: + from pypy.module._rawffi.interp_rawffi import unpack_simple_shape + from pypy.module._rawffi.array import W_Array, W_ArrayInstance + arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + # TODO: fix this hack; fromaddress() will allocate memory if address + # is null and there seems to be no way around it (ll_buffer can not + # be touched directly) + nullarr = arr.fromaddress(space, rffi.cast(rffi.ULONG, 0), 0) + assert isinstance(nullarr, W_ArrayInstance) + nullarr.free(space) + state.w_nullptr = space.wrap(nullarr) + return state.w_nullptr @unwrap_spec(name=str) def resolve_name(space, name): @@ -101,6 +120,11 @@ state = space.fromcache(State) state.w_clgen_callback = w_callback + at unwrap_spec(w_callback=W_Root) +def set_function_generator(space, w_callback): + state = space.fromcache(State) + state.w_fngen_callback = w_callback + def register_class(space, w_pycppclass): w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) @@ -108,7 +132,7 @@ # class allows simple aliasing of methods) capi.pythonize(space, cppclass.name, w_pycppclass) state = space.fromcache(State) - state.cppclass_registry[cppclass.handle] = w_pycppclass + state.cppclass_registry[rffi.cast(rffi.LONG, cppclass.handle)] = w_pycppclass class W_CPPLibrary(W_Root): @@ -580,12 +604,10 @@ def get_returntype(self): return self.space.wrap(self.converter.name) - @jit.elidable_promote() def _get_offset(self, cppinstance): if cppinstance: assert lltype.typeOf(cppinstance.cppclass.handle) == lltype.typeOf(self.scope.handle) - offset = self.offset + capi.c_base_offset(self.space, - cppinstance.cppclass, self.scope, cppinstance.get_rawobject(), 1) + offset = self.offset + cppinstance.cppclass.get_base_offset(cppinstance, self.scope) else: offset = self.offset return offset @@ -694,7 +716,6 @@ def get_method_names(self): return self.space.newlist([self.space.wrap(name) for name in self.methods]) - @jit.elidable_promote('0') def get_overload(self, name): try: return self.methods[name] @@ -707,7 +728,6 @@ def get_datamember_names(self): return self.space.newlist([self.space.wrap(name) for name in self.datamembers]) - @jit.elidable_promote('0') def get_datamember(self, name): try: return self.datamembers[name] @@ -717,7 +737,6 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote('0') def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature @@ -886,6 +905,10 @@ def find_datamember(self, name): raise self.missing_attribute_error(name) + def get_base_offset(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + return 0 + def get_cppthis(self, cppinstance, calling_scope): assert self == cppinstance.cppclass return cppinstance.get_rawobject() @@ -917,10 +940,15 @@ class W_ComplexCPPClass(W_CPPClass): - def get_cppthis(self, cppinstance, calling_scope): + def get_base_offset(self, cppinstance, calling_scope): assert self == cppinstance.cppclass offset = capi.c_base_offset(self.space, self, calling_scope, cppinstance.get_rawobject(), 1) + return offset + + def get_cppthis(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + offset = self.get_base_offset(cppinstance, calling_scope) return capi.direct_ptradd(cppinstance.get_rawobject(), offset) W_ComplexCPPClass.typedef = TypeDef( @@ -1130,19 +1158,23 @@ def get_pythonized_cppclass(space, handle): state = space.fromcache(State) try: - w_pycppclass = state.cppclass_registry[handle] + w_pycppclass = state.cppclass_registry[rffi.cast(rffi.LONG, handle)] except KeyError: final_name = capi.c_scoped_final_name(space, handle) # the callback will cache the class by calling register_class w_pycppclass = space.call_function(state.w_clgen_callback, space.wrap(final_name)) return w_pycppclass +def get_interface_func(space, w_callable, npar): + state = space.fromcache(State) + return space.call_function(state.w_fngen_callback, w_callable, space.wrap(npar)) + def wrap_cppobject(space, rawobject, cppclass, do_cast=True, python_owns=False, is_ref=False, fresh=False): rawobject = rffi.cast(capi.C_OBJECT, rawobject) - # cast to actual cast if requested and possible - w_pycppclass = space.w_None + # cast to actual if requested and possible + w_pycppclass = None if do_cast and rawobject: actual = capi.c_actual_class(space, cppclass, rawobject) if actual != cppclass.handle: @@ -1158,7 +1190,7 @@ # the variables are re-assigned yet) pass - if space.is_w(w_pycppclass, space.w_None): + if w_pycppclass is None: w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created @@ -1174,16 +1206,30 @@ memory_regulator.register(cppinstance) return w_cppinstance - at unwrap_spec(w_cppinstance=W_CPPInstance) -def addressof(space, w_cppinstance): - """Takes a bound C++ instance, returns the raw address.""" - address = rffi.cast(rffi.LONG, w_cppinstance.get_rawobject()) +def _addressof(space, w_obj): + try: + # attempt to extract address from array + return rffi.cast(rffi.INTPTR_T, converter.get_rawbuffer(space, w_obj)) + except TypeError: + pass + # attempt to get address of C++ instance + return rffi.cast(rffi.INTPTR_T, converter.get_rawobject(space, w_obj)) + + at unwrap_spec(w_obj=W_Root) +def addressof(space, w_obj): + """Takes a bound C++ instance or array, returns the raw address.""" + address = _addressof(space, w_obj) return space.wrap(address) - at unwrap_spec(address=int, owns=bool) -def bind_object(space, address, w_pycppclass, owns=False): + at unwrap_spec(owns=bool, cast=bool) +def bind_object(space, w_obj, w_pycppclass, owns=False, cast=False): """Takes an address and a bound C++ class proxy, returns a bound instance.""" - rawobject = rffi.cast(capi.C_OBJECT, address) + try: + # attempt address from array or C++ instance + rawobject = rffi.cast(capi.C_OBJECT, _addressof(space, w_obj)) + except Exception: + # accept integer value as address + rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj)) w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) if not w_cppclass: w_cppclass = scope_byname(space, space.str_w(w_pycppclass)) @@ -1191,4 +1237,4 @@ raise OperationError(space.w_TypeError, space.wrap("no such class: %s" % space.str_w(w_pycppclass))) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) - return wrap_cppobject(space, rawobject, cppclass, do_cast=False, python_owns=owns) + return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -55,6 +55,19 @@ def clgen_callback(name): return get_pycppclass(name) +def fngen_callback(func, npar): # todo, some kind of arg transform spec + if npar == 0: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + return func(la0) + return wrapper + else: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + la1 = [a1[i] for i in range(npar)] + return func(la0, la1) + return wrapper + def make_static_function(func_name, cppol): def function(*args): @@ -416,6 +429,9 @@ # class generator callback cppyy._set_class_generator(clgen_callback) + # function generator callback + cppyy._set_function_generator(fngen_callback) + # user interface objects (note the two-step of not calling scope_byname here: # creation of global functions may cause the creation of classes in the global # namespace, so gbl must exist at that point to cache them) @@ -431,6 +447,9 @@ # be the same issue for all typedef'd builtin types setattr(gbl, 'unsigned int', int) + # install nullptr as a unique reference + setattr(gbl, 'nullptr', cppyy._get_nullptr()) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -8,6 +8,7 @@ #include "TApplication.h" #include "TInterpreter.h" +#include "TVirtualMutex.h" #include "Getline.h" #include "TBaseClass.h" @@ -24,6 +25,8 @@ // for pythonization #include "TTree.h" #include "TBranch.h" +#include "TF1.h" +#include "TString.h" #include "Api.h" @@ -34,15 +37,15 @@ #include #include +// for recursive_remove callback +#include "pypy_macros.h" + /* ROOT/CINT internals --------------------------------------------------- */ extern long G__store_struct_offset; extern "C" void G__LockCriticalSection(); extern "C" void G__UnlockCriticalSection(); -#define G__SETMEMFUNCENV (long)0x7fff0035 -#define G__NOP (long)0x7fff00ff - namespace { class Cppyy_OpenedTClass : public TDictionary { @@ -57,6 +60,16 @@ TList* fAllPubMethod; //all public methods (including from base classes) }; +// memory regulation (cppyy_recursive_remove is generated as a cpyext capi call) +extern "C" void _Py_cppyy_recursive_remove(void*); + +class Cppyy_MemoryRegulator : public TObject { +public: + virtual void RecursiveRemove(TObject* object) { + _Py_cppyy_recursive_remove((void*)object); + } +}; + } // unnamed namespace @@ -82,6 +95,8 @@ /* initialization of the ROOT system (debatable ... ) --------------------- */ namespace { +static Cppyy_MemoryRegulator s_memreg; + class TCppyyApplication : public TApplication { public: TCppyyApplication(const char* acn, Int_t* argc, char** argv, Bool_t do_load = kTRUE) @@ -114,10 +129,13 @@ // enable auto-loader gInterpreter->EnableAutoLoading(); + + // enable memory regulation + gROOT->GetListOfCleanups()->Add(&s_memreg); } }; -static const char* appname = "pypy-cppyy"; +static const char* appname = "PyPyROOT"; class ApplicationStarter { public: @@ -126,11 +144,10 @@ assert(g_classrefs.size() == (ClassRefs_t::size_type)GLOBAL_HANDLE); g_classref_indices[""] = (ClassRefs_t::size_type)GLOBAL_HANDLE; g_classrefs.push_back(TClassRef("")); - g_classref_indices["std"] = g_classrefs.size(); - g_classrefs.push_back(TClassRef("")); // CINT ignores std - g_classref_indices["::std"] = g_classrefs.size(); - g_classrefs.push_back(TClassRef("")); // id. - + // CINT ignores std/::std, so point them to the global namespace + g_classref_indices["std"] = (ClassRefs_t::size_type)GLOBAL_HANDLE; + g_classref_indices["::std"] = (ClassRefs_t::size_type)GLOBAL_HANDLE; + // an offset for the interpreted methods g_interpreted.push_back(G__MethodInfo()); @@ -182,6 +199,7 @@ TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) return (TFunction*)cr->GetListOfMethods()->At(idx); + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return (TFunction*)idx; } @@ -220,21 +238,25 @@ /* name to opaque C++ scope representation -------------------------------- */ int cppyy_num_scopes(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { /* not supported as CINT does not store classes hierarchically */ return 0; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return gClassTable->Classes(); } char* cppyy_scope_name(cppyy_scope_t handle, int iscope) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { /* not supported as CINT does not store classes hierarchically */ assert(!"scope name lookup not supported on inner scopes"); return 0; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); std::string name = gClassTable->At(iscope); if (name.find("::") == std::string::npos) return cppstring_to_cstring(name); @@ -242,6 +264,7 @@ } char* cppyy_resolve_name(const char* cppitem_name) { + R__LOCKGUARD2(gCINTMutex); std::string tname = cppitem_name; // global namespace? @@ -260,7 +283,7 @@ if (ti.Property() & G__BIT_ISENUM) return cppstring_to_cstring("unsigned int"); - // actual typedef resolution; add back array declartion portion, if needed + // actual typedef resolution; add back array declaration portion, if needed std::string rt = ti.TrueName(); // builtin STL types have fake typedefs :/ @@ -274,6 +297,8 @@ } cppyy_scope_t cppyy_get_scope(const char* scope_name) { + R__LOCKGUARD2(gCINTMutex); + // CINT still has trouble with std:: sometimes ... if (strncmp(scope_name, "std::", 5) == 0) scope_name = &scope_name[5]; @@ -303,6 +328,8 @@ } cppyy_type_t cppyy_get_template(const char* template_name) { + R__LOCKGUARD2(gCINTMutex); + ClassRefIndices_t::iterator icr = g_classref_indices.find(template_name); if (icr != g_classref_indices.end()) return (cppyy_type_t)icr->second; @@ -322,6 +349,7 @@ } cppyy_type_t cppyy_actual_class(cppyy_type_t klass, cppyy_object_t obj) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(klass); TClass* clActual = cr->GetActualClass( (void*)obj ); if (clActual && clActual != cr.GetClass()) { @@ -334,6 +362,7 @@ /* memory management ------------------------------------------------------ */ cppyy_object_t cppyy_allocate(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); return (cppyy_object_t)malloc(cr->Size()); } @@ -343,6 +372,7 @@ } void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); cr->Destructor((void*)self, true); } @@ -352,6 +382,8 @@ static inline G__value cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + R__LOCKGUARD2(gCINTMutex); + G__param* libp = (G__param*)((char*)args - offsetof(G__param, para)); assert(libp->paran == nargs); fixup_args(libp); @@ -378,7 +410,6 @@ G__settemplevel(1); long index = (long)&method; - G__CurrentCall(G__SETMEMFUNCENV, 0, &index); // TODO: access to store_struct_offset won't work on Windows long store_struct_offset = G__store_struct_offset; @@ -392,7 +423,6 @@ if (G__get_return(0) > G__RETURN_NORMAL) G__security_recover(0); // 0 ensures silence - G__CurrentCall(G__NOP, 0, 0); G__settemplevel(-1); G__UnlockCriticalSection(); @@ -449,6 +479,7 @@ } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + R__LOCKGUARD2(gCINTMutex); G__value result = cppyy_call_T(method, self, nargs, args); G__pop_tempobject_nodel(); if (result.ref && *(long*)result.ref) { @@ -460,6 +491,7 @@ } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { + R__LOCKGUARD2(gCINTMutex); cppyy_object_t self = (cppyy_object_t)NULL; if ((InterpretedFuncs_t::size_type)method >= g_interpreted.size()) { G__setgvp((long)G__PVOID); @@ -476,9 +508,10 @@ cppyy_object_t cppyy_call_o(cppyy_type_t method, cppyy_object_t self, int nargs, void* args, cppyy_type_t /*result_type*/ ) { + R__LOCKGUARD2(gCINTMutex); G__value result = cppyy_call_T(method, self, nargs, args); G__pop_tempobject_nodel(); - return G__int(result); + return (cppyy_object_t)G__int(result); } cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /*handle*/, cppyy_index_t /*idx*/) { @@ -512,15 +545,17 @@ /* scope reflection information ------------------------------------------- */ int cppyy_is_namespace(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetClassInfo()) return cr->Property() & G__BIT_ISNAMESPACE; - if (strcmp(cr.GetClassName(), "") == 0) + if (handle == (cppyy_scope_t)GLOBAL_HANDLE) return true; return false; } int cppyy_is_enum(const char* type_name) { + R__LOCKGUARD2(gCINTMutex); G__TypeInfo ti(type_name); return (ti.Property() & G__BIT_ISENUM); } @@ -528,6 +563,7 @@ /* type/class reflection information -------------------------------------- */ char* cppyy_final_name(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetClassInfo()) { std::string true_name = G__TypeInfo(cr->GetName()).TrueName(); @@ -540,6 +576,7 @@ } char* cppyy_scoped_final_name(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetClassInfo()) { std::string true_name = G__TypeInfo(cr->GetName()).TrueName(); @@ -555,6 +592,7 @@ } int cppyy_num_bases(cppyy_type_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfBases() != 0) return cr->GetListOfBases()->GetSize(); @@ -562,12 +600,14 @@ } char* cppyy_base_name(cppyy_type_t handle, int base_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TBaseClass* b = (TBaseClass*)cr->GetListOfBases()->At(base_index); return type_cppstring_to_cstring(b->GetName()); } int cppyy_is_subtype(cppyy_type_t derived_handle, cppyy_type_t base_handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& derived_type = type_from_handle(derived_handle); TClassRef& base_type = type_from_handle(base_handle); return derived_type->GetBaseClass(base_type) != 0; @@ -575,6 +615,8 @@ size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int /* direction */) { + R__LOCKGUARD2(gCINTMutex); + // WARNING: CINT can not handle actual dynamic casts! TClassRef& derived_type = type_from_handle(derived_handle); TClassRef& base_type = type_from_handle(base_handle); @@ -606,10 +648,11 @@ /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfMethods()) return cr->GetListOfMethods()->GetSize(); - else if (strcmp(cr.GetClassName(), "") == 0) { + else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { if (g_globalfuncs.empty()) { TCollection* funcs = gROOT->GetListOfGlobalFunctions(kTRUE); g_globalfuncs.reserve(funcs->GetSize()); @@ -628,13 +671,17 @@ } cppyy_index_t cppyy_method_index_at(cppyy_scope_t handle, int imeth) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) return (cppyy_index_t)imeth; + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return (cppyy_index_t)&g_globalfuncs[imeth]; } cppyy_index_t* cppyy_method_indices_from_name(cppyy_scope_t handle, const char* name) { + R__LOCKGUARD2(gCINTMutex); + std::vector result; TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { @@ -649,14 +696,12 @@ } ++imeth; } - } - - if (result.empty()) { + } else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { TCollection* funcs = gROOT->GetListOfGlobalFunctions(kTRUE); TFunction* func = 0; TIter ifunc(funcs); while ((func = (TFunction*)ifunc.Next())) { - if (strcmp(func->GetName(), name) == 0) { + if (strcmp(name, func->GetName()) == 0) { g_globalfuncs.push_back(*func); result.push_back((cppyy_index_t)func); } @@ -666,7 +711,7 @@ if (result.empty()) return (cppyy_index_t*)0; - cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*result.size()+1); + cppyy_index_t* llresult = (cppyy_index_t*)malloc(sizeof(cppyy_index_t)*(result.size()+1)); for (int i = 0; i < (int)result.size(); ++i) llresult[i] = result[i]; llresult[result.size()] = -1; return llresult; @@ -674,6 +719,7 @@ char* cppyy_method_name(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); std::string name = f->GetName(); TClassRef& cr = type_from_handle(handle); @@ -685,6 +731,7 @@ } char* cppyy_method_result_type(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cppyy_is_constructor(handle, idx)) return cppstring_to_cstring("constructor"); @@ -693,16 +740,19 @@ } int cppyy_method_num_args(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); return f->GetNargs(); } int cppyy_method_req_args(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); return f->GetNargs() - f->GetNargsOpt(); } char* cppyy_method_arg_type(cppyy_scope_t handle, cppyy_index_t idx, int arg_index) { + R__LOCKGUARD2(gCINTMutex); TFunction* f = type_get_method(handle, idx); TMethodArg* arg = (TMethodArg*)f->GetListOfMethodArgs()->At(arg_index); return type_cppstring_to_cstring(arg->GetFullTypeName()); @@ -714,6 +764,7 @@ } char* cppyy_method_signature(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); std::ostringstream sig; @@ -733,6 +784,7 @@ int cppyy_method_is_template(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); std::string name = f->GetName(); @@ -746,6 +798,7 @@ char* cppyy_method_template_arg_name( cppyy_scope_t handle, cppyy_index_t idx, cppyy_index_t /*iarg*/) { + R__LOCKGUARD2(gCINTMutex); // TODO: return only the name for the requested arg TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); @@ -756,6 +809,8 @@ cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); TFunction* f = type_get_method(handle, idx); if (cr && cr.GetClass() && !cr->IsLoaded()) { @@ -780,10 +835,12 @@ } cppyy_index_t cppyy_get_global_operator(cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& lccr = type_from_handle(lc); TClassRef& rccr = type_from_handle(rc); - if (!lccr.GetClass() || !rccr.GetClass() || scope != GLOBAL_HANDLE) + if (!lccr.GetClass() || !rccr.GetClass() || scope != (cppyy_scope_t)GLOBAL_HANDLE) return (cppyy_index_t)-1; // (void*)-1 is in kernel space, so invalid as a method handle std::string lcname = lccr->GetName(); @@ -811,12 +868,14 @@ /* method properties ----------------------------------------------------- */ int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TMethod* m = (TMethod*)cr->GetListOfMethods()->At(idx); return strcmp(m->GetName(), ((G__ClassInfo*)cr->GetClassInfo())->Name()) == 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t idx) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); TMethod* m = (TMethod*)cr->GetListOfMethods()->At(idx); return m->Property() & G__BIT_ISSTATIC; @@ -825,10 +884,12 @@ /* data member reflection information ------------------------------------- */ int cppyy_num_datamembers(cppyy_scope_t handle) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass() && cr->GetListOfDataMembers()) return cr->GetListOfDataMembers()->GetSize(); - else if (strcmp(cr.GetClassName(), "") == 0) { + else if (handle == (cppyy_scope_t)GLOBAL_HANDLE) { TCollection* vars = gROOT->GetListOfGlobals(kTRUE); if (g_globalvars.size() != (GlobalVars_t::size_type)vars->GetSize()) { g_globalvars.clear(); @@ -847,16 +908,21 @@ } char* cppyy_datamember_name(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return cppstring_to_cstring(m->GetName()); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return cppstring_to_cstring(gbl.GetName()); } char* cppyy_datamember_type(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); @@ -870,21 +936,26 @@ } return cppstring_to_cstring(fullType); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return cppstring_to_cstring(gbl.GetFullTypeName()); } size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return (size_t)m->GetOffsetCint(); } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; return (size_t)gbl.GetAddress(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { + R__LOCKGUARD2(gCINTMutex); + TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { // called from updates; add a hard reset as the code itself caches in @@ -908,32 +979,38 @@ } ++idm; } + } else if (handle == (cppyy_type_t)GLOBAL_HANDLE) { + TGlobal* gbl = (TGlobal*)gROOT->GetListOfGlobals(kTRUE)->FindObject(name); + if (!gbl) + return -1; + int idx = g_globalvars.size(); + g_globalvars.push_back(*gbl); + return idx; } - TGlobal* gbl = (TGlobal*)gROOT->GetListOfGlobals(kTRUE)->FindObject(name); - if (!gbl) - return -1; - int idx = g_globalvars.size(); - g_globalvars.push_back(*gbl); - return idx; + return -1; } /* data member properties ------------------------------------------------ */ int cppyy_is_publicdata(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return m->Property() & G__BIT_ISPUBLIC; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return 1; // global data is always public } int cppyy_is_staticdata(cppyy_scope_t handle, int datamember_index) { + R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); return m->Property() & G__BIT_ISSTATIC; } + assert(handle == (cppyy_type_t)GLOBAL_HANDLE); return 1; // global data is always static } @@ -959,16 +1036,9 @@ return (cppyy_object_t)new std::string(*(std::string*)ptr); } -void cppyy_assign2stdstring(cppyy_object_t ptr, const char* str) { - *((std::string*)ptr) = str; -} - -void cppyy_free_stdstring(cppyy_object_t ptr) { - delete (std::string*)ptr; -} - void* cppyy_load_dictionary(const char* lib_name) { + R__LOCKGUARD2(gCINTMutex); if (0 <= gSystem->Load(lib_name)) return (void*)1; return (void*)0; @@ -976,6 +1046,13 @@ /* pythonization helpers -------------------------------------------------- */ +typedef double (*tfn_callback)(double*, double*); + +cppyy_object_t cppyy_create_tf1(const char* funcname, unsigned long address, + double xmin, double xmax, int npar) { + return (cppyy_object_t)new TF1(funcname, (tfn_callback)address, xmin, xmax, npar); +} + cppyy_object_t cppyy_ttree_Branch(void* vtree, const char* branchname, const char* classname, void* addobj, int bufsize, int splitlevel) { // this little song-and-dance is to by-pass the handwritten Branch methods @@ -987,3 +1064,11 @@ long long cppyy_ttree_GetEntry(void* vtree, long long entry) { return (long long)((TTree*)vtree)->GetEntry((Long64_t)entry); } + +cppyy_object_t cppyy_charp2TString(const char* str) { + return (cppyy_object_t)new TString(str); +} + +cppyy_object_t cppyy_TString2TString(cppyy_object_t ptr) { + return (cppyy_object_t)new TString(*(TString*)ptr); +} diff --git a/pypy/module/cppyy/src/clingcwrapper.cxx b/pypy/module/cppyy/src/clingcwrapper.cxx new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/src/clingcwrapper.cxx @@ -0,0 +1,1810 @@ +#include "cppyy.h" +#include "clingcwrapper.h" + +/************************************************************************* + * Copyright (C) 1995-2014, the ROOT team. * + * LICENSE: LGPLv2.1; see http://root.cern.ch/drupal/content/license * + * CONTRIBUTORS: see http://root.cern.ch/drupal/content/contributors * + *************************************************************************/ + +#include + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclBase.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/Type.h" From noreply at buildbot.pypy.org Thu Apr 24 01:06:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 01:06:57 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix Message-ID: <20140423230657.91B2B1C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70900:086aa4773f70 Date: 2014-04-23 19:05 -0400 http://bitbucket.org/pypy/pypy/changeset/086aa4773f70/ Log: fix diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -114,7 +114,7 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - buf = space.buffer_w(w_string) + buf = space.readbuf_w(w_string) size = buf.getlength() assert size >= 0 if pos > size: From noreply at buildbot.pypy.org Thu Apr 24 01:42:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 24 Apr 2014 01:42:52 +0200 (CEST) Subject: [pypy-commit] pypy default: might as well shortcut through finditem_str Message-ID: <20140423234252.DC4321C02F2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70901:5c0a54b343c9 Date: 2014-04-23 16:10 -0700 http://bitbucket.org/pypy/pypy/changeset/5c0a54b343c9/ Log: might as well shortcut through finditem_str diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -881,8 +881,8 @@ def LOAD_NAME(self, nameindex, next_instr): if self.w_locals is not self.w_globals: - w_varname = self.getname_w(nameindex) - w_value = self.space.finditem(self.w_locals, w_varname) + varname = self.getname_u(nameindex) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return From noreply at buildbot.pypy.org Thu Apr 24 01:42:55 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 24 Apr 2014 01:42:55 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140423234255.25B3C1C02F2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70902:23776e232207 Date: 2014-04-23 16:41 -0700 http://bitbucket.org/pypy/pypy/changeset/23776e232207/ Log: merge default diff too long, truncating to 2000 out of 9793 lines diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -51,7 +51,7 @@ for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) - shutil.rmtree(self.temp_dir, True) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,7 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) - f.close() + f.close() def test_head(self): response = self.request( diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,6 +1,7 @@ import os, sys, imp import tempfile, binascii + def get_hashed_dir(cfile): with open(cfile,'r') as fid: content = fid.read() @@ -15,7 +16,7 @@ output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) - return output_dir + return output_dir def _get_c_extension_suffix(): diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -0,0 +1,154 @@ +======================= +What's new in PyPy 2.2+ +======================= + +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 + +.. branch: release-2.2.x + +.. branch: numpy-newbyteorder +Clean up numpy types, add newbyteorder functionality + +.. branch: windows-packaging +Package tk/tcl runtime with win32 + +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards + +.. branch: remove-intlong-smm +kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose + +.. branch: stdlib-2.7.6 +Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better + +.. branch: numpy-searchsorted +Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD + +.. branch: small-unroll-improvements +Improve optimization of small allocation-heavy loops in the JIT diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,145 +1,11 @@ ======================= -What's new in PyPy 2.2+ +What's new in PyPy 2.3+ ======================= -.. this is a revision shortly after release-2.2.x -.. startrev: 4cd1bc8b3111 +.. this is a revision shortly after release-2.3.x +.. startrev: ba569fe1efdb -.. branch: release-2.2.x +.. branch: small-unroll-improvements +Improve optimiziation of small allocation-heavy loops in the JIT -.. branch: numpy-newbyteorder -Clean up numpy types, add newbyteorder functionality - -.. branch: windows-packaging -Package tk/tcl runtime with win32 - -.. branch: armhf-singlefloat -JIT support for singlefloats on ARM using the hardfloat ABI - -.. branch: voidtype_strformat -Better support for record numpy arrays - -.. branch: osx-eci-frameworks-makefile -OSX: Ensure frameworks end up in Makefile when specified in External compilation info - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace and annotator. - -.. branch: ndarray-buffer -adds support for the buffer= argument to the ndarray ctor - -.. branch: better_ftime_detect2 -On OpenBSD do not pull in libcompat.a as it is about to be removed. -And more generally, if you have gettimeofday(2) you will not need ftime(3). - -.. branch: timeb_h -Remove dependency upon on OpenBSD. This will be disappearing -along with libcompat.a. - -.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 -Fix 3 broken links on PyPy published papers in docs. - -.. branch: jit-ordereddict - -.. branch: refactor-str-types -Remove multimethods on str/unicode/bytearray and make the implementations share code. - -.. branch: remove-del-from-generatoriterator -Speed up generators that don't yield inside try or wait blocks by skipping -unnecessary cleanup. - -.. branch: annotator -Remove FlowObjSpace. -Improve cohesion between rpython.flowspace and rpython.annotator. - -.. branch: detect-immutable-fields -mapdicts keep track of whether or not an attribute is every assigned to -multiple times. If it's only assigned once then an elidable lookup is used when -possible. - -.. branch: precompiled-headers -Create a Makefile using precompiled headers for MSVC platforms. -The downside is a messy nmake-compatible Makefile. Since gcc shows minimal -speedup, it was not implemented. - -.. branch: camelot -With a properly configured 256-color terminal (TERM=...-256color), the -Mandelbrot set shown during translation now uses a range of 50 colours. -Essential! - -.. branch: NonConstant -Simplify implementation of NonConstant. - -.. branch: array-propagate-len -Kill some guards and operations in JIT traces by adding integer bounds -propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). - -.. branch: optimize-int-and -Optimize away INT_AND with constant mask of 1s that fully cover the bitrange -of other operand. - -.. branch: bounds-int-add-or -Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the -operands are positive to kill some guards - -.. branch: remove-intlong-smm -kills int/long/smalllong/bool multimethods - -.. branch: numpy-refactor -Cleanup micronumpy module - -.. branch: int_w-refactor -In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. - -.. branch: test-58c3d8552833 -Fix for getarrayitem_gc_pure optimization - -.. branch: simple-range-strategy -Implements SimpleRangeListStrategy for case range(n) where n is a positive number. -Makes some traces nicer by getting rid of multiplication for calculating loop counter -and propagates that n > 0 further to get rid of guards. - -.. branch: popen-pclose -Provide an exit status for popen'ed RFiles via pclose - -.. branch: stdlib-2.7.6 -Update stdlib to v2.7.6 - -.. branch: virtual-raw-store-load -Support for virtualizing raw_store/raw_load operations - -.. branch: refactor-buffer-api -Separate the interp-level buffer API from the buffer type exposed to -app-level. The `Buffer` class is now used by `W_MemoryView` and -`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was -an alias to `Buffer`, which was wrappable itself. - -.. branch: improve-consecutive-dict-lookups -Improve the situation when dict lookups of the same key are performed in a chain - -.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 -.. branch: test_SetFromErrnoWithFilename_NULL -.. branch: test_SetFromErrnoWithFilename__tweaks - -.. branch: refactor_PyErr_SetFromErrnoWithFilename -Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext - -.. branch: win32-fixes4 -fix more tests for win32 - -.. branch: latest-improve-doc -Fix broken links in documentation - -.. branch: ast-issue1673 -fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field - -.. branch: issue1514 -Fix issues with reimporting builtin modules - -.. branch: numpypy-nditer -Implement the core of nditer, without many of the fancy flags (external_loop, buffered) - -.. branch: numpy-speed -Separate iterator from its state so jit can optimize better +.. branch: reflex-support diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -89,8 +89,23 @@ http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with -pypy. Unpack this folder in the base directory. Then open a command -prompt:: +pypy. Unpack this folder in the base directory. +The default GC_abort(...) function in misc.c will try to open a MessageBox. +You may want to disable this with the following patch:: + + --- a/misc.c Sun Apr 20 14:08:27 2014 +0300 + +++ b/misc.c Sun Apr 20 14:08:37 2014 +0300 + @@ -1058,7 +1058,7 @@ + #ifndef PCR + void GC_abort(const char *msg) + { + -# if defined(MSWIN32) + +# if 0 && defined(MSWIN32) + (void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK); + # else + GC_err_printf("%s\n", msg); + +Then open a command prompt:: cd gc-7.1 nmake -f NT_THREADS_MAKEFILE diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -34,8 +34,9 @@ def slice_w(space, ctx, start, end, w_default): if 0 <= start <= end: - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrapbytes(ctx._string[start:end]) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrapbytes(ctx._buffer.getslice(start, end, 1, + end-start)) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -97,7 +98,7 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a StrMatchContext or a UnicodeMatchContext for searching + """Make a BufMatchContext or a UnicodeMatchContext for searching in the given w_string object.""" space = self.space if pos < 0: @@ -117,16 +118,18 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - str = space.bufferstr_w(w_string) + buf = space.buffer_w(w_string) if (not space.is_none(self.w_pattern) and space.isinstance_w(self.w_pattern, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap( "can't use a bytes pattern on a string-like object")) - if pos > len(str): - pos = len(str) - if endpos > len(str): - endpos = len(str) - return rsre_core.StrMatchContext(self.code, str, + size = buf.getlength() + assert size >= 0 + if pos > size: + pos = size + if endpos > size: + endpos = size + return rsre_core.BufMatchContext(self.code, buf, pos, endpos, self.flags) def getmatch(self, ctx, found): @@ -488,8 +491,8 @@ def fget_string(self, space): ctx = self.ctx - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrapbytes(ctx._string) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrapbytes(ctx._buffer.as_str()) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -12,8 +12,10 @@ '_template_byname' : 'interp_cppyy.template_byname', '_std_string_name' : 'interp_cppyy.std_string_name', '_set_class_generator' : 'interp_cppyy.set_class_generator', + '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', + '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstance' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -190,7 +190,8 @@ [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_get_methptr_getter(space, cppscope, index): return _c_get_methptr_getter(cppscope.handle, index) @@ -214,7 +215,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -222,7 +224,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -283,7 +286,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('2') def c_is_subtype(space, derived, base): if derived == base: @@ -295,7 +299,8 @@ [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('1,2,4') def c_base_offset(space, derived, base, address, direction): if derived == base: @@ -543,19 +548,3 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) -_c_assign2stdstring = rffi.llexternal( - "cppyy_assign2stdstring", - [C_OBJECT, rffi.CCHARP], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_assign2stdstring(space, cppobject, svalue): - charp = rffi.str2charp(svalue) - _c_assign2stdstring(cppobject, charp) - rffi.free_charp(charp) -_c_free_stdstring = rffi.llexternal( - "cppyy_free_stdstring", - [C_OBJECT], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_free_stdstring(space, cppobject): - _c_free_stdstring(cppobject) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -6,8 +6,11 @@ from pypy.interpreter.baseobjspace import W_Root from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import libffi, rdynload +from rpython.tool.udir import udir + +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -19,21 +22,21 @@ if os.environ.get("ROOTSYS"): import commands (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] + if stat != 0: + rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: - rootincpath = [incdir] + rootincpath = [incdir, py.path.local(udir)] rootlibpath = commands.getoutput("root-config --libdir").split() else: - rootincpath = [] + rootincpath = [py.path.local(udir)] rootlibpath = [] def identify(): return 'CINT' -ts_reflect = False -ts_call = False +ts_reflect = True +ts_call = True ts_memory = False ts_helper = False @@ -47,13 +50,15 @@ _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) with rffi.scoped_str2charp('libCore.so') as ll_libname: _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) +with rffi.scoped_str2charp('libHist.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("cintcwrapper.cxx")], include_dirs=[incpath] + rootincpath, includes=["cintcwrapper.h"], library_dirs=rootlibpath, - libraries=["Core", "Cint"], + libraries=["Hist", "Core", "Cint"], use_cpp_linker=True, ) @@ -71,6 +76,23 @@ # CINT-specific pythonizations =============================================== +_c_charp2TString = rffi.llexternal( + "cppyy_charp2TString", + [rffi.CCHARP], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_charp2TString(space, svalue): + charp = rffi.str2charp(svalue) + result = _c_charp2TString(charp) + rffi.free_charp(charp) + return result +_c_TString2TString = rffi.llexternal( + "cppyy_TString2TString", + [C_OBJECT], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_TString2TString(space, cppobject): + return _c_TString2TString(cppobject) def _get_string_data(space, w_obj, m1, m2 = None): from pypy.module.cppyy import interp_cppyy @@ -80,10 +102,85 @@ return w_1 return obj.space.call_method(w_1, m2) +### TF1 ---------------------------------------------------------------------- +class State(object): + def __init__(self, space): + self.tfn_pyfuncs = [] + self.tfn_callbacks = [] + +_create_tf1 = rffi.llexternal( + "cppyy_create_tf1", + [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, + releasegil=False, + compilation_info=eci) + + at unwrap_spec(args_w='args_w') +def tf1_tf1(space, w_self, args_w): + """Pythonized version of TF1 constructor: + takes functions and callable objects, and allows a callback into them.""" + + from pypy.module.cppyy import interp_cppyy + tf1_class = interp_cppyy.scope_byname(space, "TF1") + + # expected signature: + # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) + argc = len(args_w) + + try: + # Note: argcount is +1 for the class (== w_self) + if argc < 5 or 6 < argc: + raise TypeError("wrong number of arguments") + + # second argument must be a name + funcname = space.str_w(args_w[1]) + + # last (optional) argument is number of parameters + npar = 0 + if argc == 6: npar = space.int_w(args_w[5]) + + # third argument must be a callable python object + w_callable = args_w[2] + if not space.is_true(space.callable(w_callable)): + raise TypeError("2nd argument is not a valid python callable") + + # generate a pointer to function + from pypy.module._cffi_backend import newtype, ctypefunc, func + + c_double = newtype.new_primitive_type(space, 'double') + c_doublep = newtype.new_pointer_type(space, c_double) + + # wrap the callable as the signature needs modifying + w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) + + w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) + w_callback = func.callback(space, w_cfunc, w_ifunc, None) + funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) + + # so far, so good; leaves on issue: CINT is expecting a wrapper, but + # we need the overload that takes a function pointer, which is not in + # the dictionary, hence this helper: + newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, + space.float_w(args_w[3]), space.float_w(args_w[4]), npar) + + from pypy.module.cppyy import interp_cppyy + w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, + do_cast=False, python_owns=True, fresh=True) + + # tie all the life times to the TF1 instance + space.setattr(w_instance, space.wrap('_callback'), w_callback) + + return w_instance + except (OperationError, TypeError, IndexError), e: + newargs_w = args_w[1:] # drop class + + # return control back to the original, unpythonized overload + ol = tf1_class.get_overload("TF1") + return ol.call(None, newargs_w) + ### TTree -------------------------------------------------------------------- _ttree_Branch = rffi.llexternal( "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], rffi.LONG, + [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, releasegil=False, compilation_info=eci) @@ -202,6 +299,8 @@ # some instance klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) w_obj = klass.construct() + # 0x10000 = kDeleteObject; reset because we own the object + space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) space.call_method(w_branch, "SetObject", w_obj) space.call_method(w_branch, "GetEntry", space.wrap(entry)) space.setattr(w_self, args_w[0], w_obj) @@ -274,6 +373,9 @@ allfuncs = [ + ### TF1 + tf1_tf1, + ### TTree ttree_Branch, ttree_iter, ttree_getattr, ] @@ -288,7 +390,14 @@ # callback coming in when app-level bound classes have been created def pythonize(space, name, w_pycppclass): - if name == "TFile": + if name == "TCollection": + _method_alias(space, w_pycppclass, "append", "Add") + _method_alias(space, w_pycppclass, "__len__", "GetSize") + + elif name == "TF1": + space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + + elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") elif name == "TObjString": @@ -310,3 +419,17 @@ elif name[0:8] == "TVectorT": # TVectorT<> template _method_alias(space, w_pycppclass, "__len__", "GetNoElements") + +# destruction callback (needs better solution, but this is for CINT +# only and should not appear outside of ROOT-specific uses) +from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL + + at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) +def _Py_cppyy_recursive_remove(space, cppobject): + from pypy.module.cppyy.interp_cppyy import memory_regulator + from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT + + obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) + if obj is not None: + memory_regulator.unregister(obj) + obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -0,0 +1,69 @@ +import py, os + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib import libffi, rdynload + +__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] + +pkgpath = py.path.local(__file__).dirpath().join(os.pardir) +srcpath = pkgpath.join("src") +incpath = pkgpath.join("include") + +import commands +(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") + +if os.environ.get("ROOTSYS"): + if config_stat != 0: # presumably Reflex-only + rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] + else: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() +else: + if config_stat == 0: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() + else: + rootincpath = [] + rootlibpath = [] + +def identify(): + return 'Cling' + +ts_reflect = False +ts_call = 'auto' +ts_memory = 'auto' +ts_helper = 'auto' + +std_string_name = 'std::basic_string' + +eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join("clingcwrapper.cxx")], + include_dirs=[incpath] + rootincpath, + includes=["clingcwrapper.h"], + library_dirs=rootlibpath, + libraries=["Cling"], + compile_extra=["-fno-strict-aliasing"], + use_cpp_linker=True, +) + +_c_load_dictionary = rffi.llexternal( + "cppyy_load_dictionary", + [rffi.CCHARP], rdynload.DLLHANDLE, + releasegil=False, + compilation_info=eci) + +def c_load_dictionary(name): + pch = _c_load_dictionary(name) + return pch + + +# Cling-specific pythonizations +def register_pythonizations(space): + "NOT_RPYTHON" + pass + +def pythonize(space, name, w_pycppclass): + pass diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -214,15 +214,22 @@ 'charp2stdstring' : ([c_ccharp], c_object), 'stdstring2stdstring' : ([c_object], c_object), - 'assign2stdstring' : ([c_object, c_ccharp], c_void), - 'free_stdstring' : ([c_object], c_void), } + # size/offset are backend-specific but fixed after load + self.c_sizeof_farg = 0 + self.c_offset_farg = 0 + + def load_reflection_library(space): state = space.fromcache(State) if state.library is None: from pypy.module._cffi_backend.libraryobj import W_Library state.library = W_Library(space, reflection_library, rdynload.RTLD_LOCAL | rdynload.RTLD_LAZY) + if state.library: + # fix constants + state.c_sizeof_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) + state.c_offset_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) return state.library def verify_backend(space): @@ -342,12 +349,12 @@ return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) def c_deallocate_function_args(space, cargs): call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) - at jit.elidable def c_function_arg_sizeof(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) - at jit.elidable + state = space.fromcache(State) + return state.c_sizeof_farg def c_function_arg_typeoffset(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) + state = space.fromcache(State) + return state.c_offset_farg # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): @@ -367,13 +374,12 @@ def c_base_name(space, cppclass, base_index): args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) - at jit.elidable_promote('2') def c_is_subtype(space, derived, base): + jit.promote(base) if derived == base: return bool(1) return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) - at jit.elidable_promote('1,2,4') def _c_base_offset(space, derived_h, base_h, address, direction): args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) @@ -504,11 +510,6 @@ return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) -def c_assign2stdstring(space, cppobject, svalue): - args = [_Arg(l=cppobject), _Arg(s=svalue)] - call_capi(space, 'assign2stdstring', args) -def c_free_stdstring(space, cppobject): - call_capi(space, 'free_stdstring', [_Arg(l=cppobject)]) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -6,8 +6,8 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import jit_libffi, rfloat -from pypy.module._rawffi.interp_rawffi import unpack_simple_shape -from pypy.module._rawffi.array import W_Array +from pypy.module._rawffi.interp_rawffi import letter2tp +from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -47,21 +47,35 @@ return rawobject return capi.C_NULL_OBJECT +def is_nullpointer_specialcase(space, w_obj): + # 0, None, and nullptr may serve as "NULL", check for any of them + + # integer 0 + try: + return space.int_w(w_obj) == 0 + except Exception: + pass + # None or nullptr + from pypy.module.cppyy import interp_cppyy + return space.is_true(space.is_(w_obj, space.w_None)) or \ + space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) + def get_rawbuffer(space, w_obj): + # raw buffer try: buf = space.buffer_w(w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass - # special case: allow integer 0 as NULL + # array type try: - buf = space.int_w(w_obj) - if buf == 0: - return rffi.cast(rffi.VOIDP, 0) + arr = space.interp_w(W_ArrayInstance, w_obj, can_be_None=True) + if arr: + return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) except Exception: pass - # special case: allow None as NULL - if space.is_true(space.is_(w_obj, space.w_None)): + # pre-defined NULL + if is_nullpointer_specialcase(space, w_obj): return rffi.cast(rffi.VOIDP, 0) raise TypeError("not an addressable buffer") @@ -118,7 +132,7 @@ def __getattr__(self, name): if name.startswith('array_'): typecode = name[len('array_'):] - arr = self.space.interp_w(W_Array, unpack_simple_shape(self.space, self.space.wrap(typecode))) + arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode)) setattr(self, name, arr) return arr raise AttributeError(name) @@ -139,8 +153,6 @@ self.size = array_size def from_memory(self, space, w_obj, w_pycppclass, offset): - if hasattr(space, "fake"): - raise NotImplementedError # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) address = rffi.cast(rffi.ULONG, address_value) @@ -261,8 +273,7 @@ self.name = name def convert_argument(self, space, w_obj, address, call_local): - raise OperationError(space.w_TypeError, - space.wrap('no converter available for type "%s"' % self.name)) + self._is_abstract(space) class BoolConverter(ffitypes.typeid(bool), TypeConverter): @@ -372,7 +383,12 @@ try: obj = get_rawbuffer(space, w_obj) except TypeError: - obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) + try: + # TODO: accept a 'capsule' rather than naked int + # (do accept int(0), though) + obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + except Exception: + obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj def convert_argument(self, space, w_obj, address, call_local): @@ -385,6 +401,24 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) + def from_memory(self, space, w_obj, w_pycppclass, offset): + # returned as a long value for the address (INTPTR_T is not proper + # per se, but rffi does not come with a PTRDIFF_T) + address = self._get_raw_address(space, w_obj, offset) + ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0]) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + arr = space.interp_w(W_Array, letter2tp(space, 'P')) + return arr.fromaddress(space, ptrval, sys.maxint) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + if is_nullpointer_specialcase(space, w_value): + address[0] = rffi.cast(rffi.VOIDP, 0) + else: + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class VoidPtrPtrConverter(TypeConverter): _immutable_fields_ = ['uses_local'] @@ -412,7 +446,7 @@ _immutable_fields_ = ['uses_local'] uses_local = True -class InstancePtrConverter(TypeConverter): +class InstanceRefConverter(TypeConverter): _immutable_fields_ = ['libffitype', 'cppclass'] libffitype = jit_libffi.types.pointer @@ -444,17 +478,7 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_obj)) - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy - return interp_cppyy.wrap_cppobject(space, address, self.cppclass, - do_cast=False, is_ref=True) - - def to_memory(self, space, w_obj, w_value, offset): - address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) - address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) - -class InstanceConverter(InstancePtrConverter): +class InstanceConverter(InstanceRefConverter): def convert_argument_libffi(self, space, w_obj, address, call_local): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -468,6 +492,28 @@ def to_memory(self, space, w_obj, w_value, offset): self._is_abstract(space) + +class InstancePtrConverter(InstanceRefConverter): + + def _unwrap_object(self, space, w_obj): + try: + return InstanceRefConverter._unwrap_object(self, space, w_obj) + except OperationError, e: + # if not instance, allow certain special cases + if is_nullpointer_specialcase(space, w_obj): + return capi.C_NULL_OBJECT + raise e + + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, + do_cast=False, is_ref=True) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class InstancePtrPtrConverter(InstancePtrConverter): _immutable_fields_ = ['uses_local'] @@ -487,12 +533,6 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible - def from_memory(self, space, w_obj, w_pycppclass, offset): - self._is_abstract(space) - - def to_memory(self, space, w_obj, w_value, offset): - self._is_abstract(space) - def finalize_call(self, space, w_obj, call_local): from pypy.module.cppyy.interp_cppyy import W_CPPInstance assert isinstance(w_obj, W_CPPInstance) @@ -501,7 +541,6 @@ class StdStringConverter(InstanceConverter): - _immutable_fields_ = ['cppclass'] def __init__(self, space, extra): from pypy.module.cppyy import interp_cppyy @@ -509,24 +548,25 @@ InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - try: + from pypy.module.cppyy.interp_cppyy import W_CPPInstance + if isinstance(w_obj, W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.c_stdstring2stdstring(space, arg) + else: return capi.c_charp2stdstring(space, space.str_w(w_obj)) - except Exception, e: - arg = InstanceConverter._unwrap_object(self, space, w_obj) - result = capi.c_stdstring2stdstring(space, arg) - return result def to_memory(self, space, w_obj, w_value, offset): try: address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - capi.c_assign2stdstring(space, address, space.str_w(w_value)) - return + assign = self.cppclass.get_overload("__assign__") + from pypy.module.cppyy import interp_cppyy + assign.call( + interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False), [w_value]) except Exception: - pass - return InstanceConverter.to_memory(self, space, w_obj, w_value, offset) + InstanceConverter.to_memory(self, space, w_obj, w_value, offset) def free_argument(self, space, arg, call_local): - capi.c_free_stdstring(space, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) class StdStringRefConverter(InstancePtrConverter): _immutable_fields_ = ['cppclass'] @@ -570,6 +610,7 @@ def free_argument(self, space, arg, call_local): if hasattr(space, "fake"): raise NotImplementedError + space.getbuiltinmodule("cpyext") from pypy.module.cpyext.pyobject import Py_DecRef, PyObject Py_DecRef(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0])) @@ -627,8 +668,10 @@ # type check for the benefit of the annotator from pypy.module.cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) - if compound == "*" or compound == "&": + if compound == "*": return InstancePtrConverter(space, cppclass) + elif compound == "&": + return InstanceRefConverter(space, cppclass) elif compound == "**": return InstancePtrPtrConverter(space, cppclass) elif compound == "": @@ -654,7 +697,7 @@ _converters["void**"] = VoidPtrPtrConverter _converters["void*&"] = VoidPtrRefConverter -# special cases (note: CINT backend requires the simple name 'string') +# special cases (note: 'string' aliases added below) _converters["std::basic_string"] = StdStringConverter _converters["const std::basic_string&"] = StdStringConverter # TODO: shouldn't copy _converters["std::basic_string&"] = StdStringRefConverter @@ -776,3 +819,27 @@ for c_type, alias in aliases: _converters[alias] = _converters[c_type] _add_aliased_converters() + +# ROOT-specific converters (TODO: this is a general use case and should grow +# an API; putting it here is done only to circumvent circular imports) +if capi.identify() == "CINT": + + class TStringConverter(InstanceConverter): + def __init__(self, space, extra): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, "TString") + InstanceConverter.__init__(self, space, cppclass) + + def _unwrap_object(self, space, w_obj): + from pypy.module.cppyy import interp_cppyy + if isinstance(w_obj, interp_cppyy.W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.backend.c_TString2TString(space, arg) + else: + return capi.backend.c_charp2TString(space, space.str_w(w_obj)) + + def free_argument(self, space, arg, call_local): + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + + _converters["TString"] = TStringConverter + _converters["const TString&"] = TStringConverter diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -53,17 +53,12 @@ if hasattr(space, "fake"): raise NotImplementedError lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args) - address = rffi.cast(rffi.ULONG, lresult) + ptrval = rffi.cast(rffi.ULONG, lresult) arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap(self.typecode))) - if address == 0: - # TODO: fix this hack; fromaddress() will allocate memory if address - # is null and there seems to be no way around it (ll_buffer can not - # be touched directly) - nullarr = arr.fromaddress(space, address, 0) - assert isinstance(nullarr, W_ArrayInstance) - nullarr.free(space) - return nullarr - return arr.fromaddress(space, address, sys.maxint) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + return arr.fromaddress(space, ptrval, sys.maxint) class VoidExecutor(FunctionExecutor): @@ -144,7 +139,7 @@ from pypy.module.cppyy import interp_cppyy newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args) assert lltype.typeOf(newthis) == capi.C_OBJECT - return space.wrap(newthis) + return space.wrap(rffi.cast(rffi.LONG, newthis)) # really want ptrdiff_t here class InstancePtrExecutor(FunctionExecutor): @@ -160,7 +155,8 @@ from pypy.module.cppyy import interp_cppyy long_result = capi.c_call_l(space, cppmethod, cppthis, num_args, args) ptr_result = rffi.cast(capi.C_OBJECT, long_result) - return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + pyres = interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + return pyres def execute_libffi(self, space, cif_descr, funcaddr, buffer): jit_libffi.jit_ffi_call(cif_descr, funcaddr, buffer) @@ -189,7 +185,7 @@ long_result = capi.c_call_o(space, cppmethod, cppthis, num_args, args, self.cppclass) ptr_result = rffi.cast(capi.C_OBJECT, long_result) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass, - do_cast=False, python_owns=True) + do_cast=False, python_owns=True, fresh=True) def execute_libffi(self, space, cif_descr, funcaddr, buffer): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -206,6 +202,13 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible +class StdStringRefExecutor(InstancePtrExecutor): + + def __init__(self, space, cppclass): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) + InstancePtrExecutor.__init__(self, space, cppclass) + class PyObjectExecutor(PtrTypeExecutor): @@ -295,12 +298,12 @@ _executors["void*"] = PtrTypeExecutor _executors["const char*"] = CStringExecutor -# special cases +# special cases (note: 'string' aliases added below) _executors["constructor"] = ConstructorExecutor _executors["std::basic_string"] = StdStringExecutor -_executors["const std::basic_string&"] = StdStringExecutor -_executors["std::basic_string&"] = StdStringExecutor # TODO: shouldn't copy +_executors["const std::basic_string&"] = StdStringRefExecutor +_executors["std::basic_string&"] = StdStringRefExecutor _executors["PyObject*"] = PyObjectExecutor @@ -363,7 +366,11 @@ "NOT_RPYTHON" aliases = ( ("const char*", "char*"), + ("std::basic_string", "string"), + ("const std::basic_string&", "const string&"), + ("std::basic_string&", "string&"), + ("PyObject*", "_object*"), ) diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -89,11 +89,11 @@ cppyy_index_t cppyy_get_global_operator( cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op); - /* method properties ----------------------------------------------------- */ + /* method properties ------------------------------------------------------ */ int cppyy_is_constructor(cppyy_type_t type, cppyy_index_t idx); int cppyy_is_staticmethod(cppyy_type_t type, cppyy_index_t idx); - /* data member reflection information ------------------------------------ */ + /* data member reflection information ------------------------------------- */ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); @@ -101,7 +101,7 @@ int cppyy_datamember_index(cppyy_scope_t scope, const char* name); - /* data member properties ------------------------------------------------ */ + /* data member properties ------------------------------------------------- */ int cppyy_is_publicdata(cppyy_type_t type, int datamember_index); int cppyy_is_staticdata(cppyy_type_t type, int datamember_index); @@ -112,8 +112,6 @@ cppyy_object_t cppyy_charp2stdstring(const char* str); cppyy_object_t cppyy_stdstring2stdstring(cppyy_object_t ptr); - void cppyy_assign2stdstring(cppyy_object_t ptr, const char* str); - void cppyy_free_stdstring(cppyy_object_t ptr); #ifdef __cplusplus } diff --git a/pypy/module/cppyy/include/cintcwrapper.h b/pypy/module/cppyy/include/cintcwrapper.h --- a/pypy/module/cppyy/include/cintcwrapper.h +++ b/pypy/module/cppyy/include/cintcwrapper.h @@ -11,12 +11,18 @@ void* cppyy_load_dictionary(const char* lib_name); /* pythonization helpers */ + cppyy_object_t cppyy_create_tf1(const char* funcname, unsigned long address, + double xmin, double xmax, int npar); + cppyy_object_t cppyy_ttree_Branch( void* vtree, const char* branchname, const char* classname, void* addobj, int bufsize, int splitlevel); long long cppyy_ttree_GetEntry(void* vtree, long long entry); + cppyy_object_t cppyy_charp2TString(const char* str); + cppyy_object_t cppyy_TString2TString(cppyy_object_t ptr); + #ifdef __cplusplus } #endif // ifdef __cplusplus diff --git a/pypy/module/cppyy/include/clingcwrapper.h b/pypy/module/cppyy/include/clingcwrapper.h new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/include/clingcwrapper.h @@ -0,0 +1,37 @@ +#ifndef CPPYY_CLINGCWRAPPER +#define CPPYY_CLINGCWRAPPER + +#include "capi.h" + +#ifdef __cplusplus +extern "C" { +#endif // ifdef __cplusplus + + /* misc helpers */ + void* cppyy_load_dictionary(const char* lib_name); + +#ifdef __cplusplus +} +#endif // ifdef __cplusplus + +// TODO: pick up from llvm-config --cxxflags +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +// Wrapper callback: except this to become available from Cling directly +typedef void (*CPPYY_Cling_Wrapper_t)(void*, int, void**, void*); + +#endif // ifndef CPPYY_CLINGCWRAPPER diff --git a/pypy/module/cppyy/include/cppyy.h b/pypy/module/cppyy/include/cppyy.h --- a/pypy/module/cppyy/include/cppyy.h +++ b/pypy/module/cppyy/include/cppyy.h @@ -17,7 +17,7 @@ #ifdef __cplusplus struct CPPYY_G__p2p { #else -#typedef struct +typedef struct { #endif long i; int reftype; diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -40,9 +40,28 @@ def __init__(self, space): self.cppscope_cache = { "void" : W_CPPClass(space, "void", capi.C_NULL_TYPE) } + self.w_nullptr = None self.cpptemplate_cache = {} self.cppclass_registry = {} self.w_clgen_callback = None + self.w_fngen_callback = None + +def get_nullptr(space): + if hasattr(space, "fake"): + raise NotImplementedError + state = space.fromcache(State) + if state.w_nullptr is None: + from pypy.module._rawffi.interp_rawffi import unpack_simple_shape + from pypy.module._rawffi.array import W_Array, W_ArrayInstance + arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + # TODO: fix this hack; fromaddress() will allocate memory if address + # is null and there seems to be no way around it (ll_buffer can not + # be touched directly) + nullarr = arr.fromaddress(space, rffi.cast(rffi.ULONG, 0), 0) + assert isinstance(nullarr, W_ArrayInstance) + nullarr.free(space) + state.w_nullptr = space.wrap(nullarr) + return state.w_nullptr @unwrap_spec(name=str) def resolve_name(space, name): @@ -101,6 +120,11 @@ state = space.fromcache(State) state.w_clgen_callback = w_callback + at unwrap_spec(w_callback=W_Root) +def set_function_generator(space, w_callback): + state = space.fromcache(State) + state.w_fngen_callback = w_callback + def register_class(space, w_pycppclass): w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) @@ -108,7 +132,7 @@ # class allows simple aliasing of methods) capi.pythonize(space, cppclass.name, w_pycppclass) state = space.fromcache(State) - state.cppclass_registry[cppclass.handle] = w_pycppclass + state.cppclass_registry[rffi.cast(rffi.LONG, cppclass.handle)] = w_pycppclass class W_CPPLibrary(W_Root): @@ -580,12 +604,10 @@ def get_returntype(self): return self.space.wrap(self.converter.name) - @jit.elidable_promote() def _get_offset(self, cppinstance): if cppinstance: assert lltype.typeOf(cppinstance.cppclass.handle) == lltype.typeOf(self.scope.handle) - offset = self.offset + capi.c_base_offset(self.space, - cppinstance.cppclass, self.scope, cppinstance.get_rawobject(), 1) + offset = self.offset + cppinstance.cppclass.get_base_offset(cppinstance, self.scope) else: offset = self.offset return offset @@ -694,7 +716,6 @@ def get_method_names(self): return self.space.newlist([self.space.wrap(name) for name in self.methods]) - @jit.elidable_promote('0') def get_overload(self, name): try: return self.methods[name] @@ -707,7 +728,6 @@ def get_datamember_names(self): return self.space.newlist([self.space.wrap(name) for name in self.datamembers]) - @jit.elidable_promote('0') def get_datamember(self, name): try: return self.datamembers[name] @@ -717,7 +737,6 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote('0') def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature @@ -886,6 +905,10 @@ def find_datamember(self, name): raise self.missing_attribute_error(name) + def get_base_offset(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + return 0 + def get_cppthis(self, cppinstance, calling_scope): assert self == cppinstance.cppclass return cppinstance.get_rawobject() @@ -917,10 +940,15 @@ class W_ComplexCPPClass(W_CPPClass): - def get_cppthis(self, cppinstance, calling_scope): + def get_base_offset(self, cppinstance, calling_scope): assert self == cppinstance.cppclass offset = capi.c_base_offset(self.space, self, calling_scope, cppinstance.get_rawobject(), 1) + return offset + + def get_cppthis(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + offset = self.get_base_offset(cppinstance, calling_scope) return capi.direct_ptradd(cppinstance.get_rawobject(), offset) W_ComplexCPPClass.typedef = TypeDef( @@ -1130,19 +1158,23 @@ def get_pythonized_cppclass(space, handle): state = space.fromcache(State) try: - w_pycppclass = state.cppclass_registry[handle] + w_pycppclass = state.cppclass_registry[rffi.cast(rffi.LONG, handle)] except KeyError: final_name = capi.c_scoped_final_name(space, handle) # the callback will cache the class by calling register_class w_pycppclass = space.call_function(state.w_clgen_callback, space.wrap(final_name)) return w_pycppclass +def get_interface_func(space, w_callable, npar): + state = space.fromcache(State) + return space.call_function(state.w_fngen_callback, w_callable, space.wrap(npar)) + def wrap_cppobject(space, rawobject, cppclass, do_cast=True, python_owns=False, is_ref=False, fresh=False): rawobject = rffi.cast(capi.C_OBJECT, rawobject) - # cast to actual cast if requested and possible - w_pycppclass = space.w_None + # cast to actual if requested and possible + w_pycppclass = None if do_cast and rawobject: actual = capi.c_actual_class(space, cppclass, rawobject) if actual != cppclass.handle: @@ -1158,7 +1190,7 @@ # the variables are re-assigned yet) pass - if space.is_w(w_pycppclass, space.w_None): + if w_pycppclass is None: w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created @@ -1174,16 +1206,30 @@ memory_regulator.register(cppinstance) return w_cppinstance - at unwrap_spec(w_cppinstance=W_CPPInstance) -def addressof(space, w_cppinstance): - """Takes a bound C++ instance, returns the raw address.""" - address = rffi.cast(rffi.LONG, w_cppinstance.get_rawobject()) +def _addressof(space, w_obj): + try: + # attempt to extract address from array + return rffi.cast(rffi.INTPTR_T, converter.get_rawbuffer(space, w_obj)) + except TypeError: + pass + # attempt to get address of C++ instance + return rffi.cast(rffi.INTPTR_T, converter.get_rawobject(space, w_obj)) + + at unwrap_spec(w_obj=W_Root) +def addressof(space, w_obj): + """Takes a bound C++ instance or array, returns the raw address.""" + address = _addressof(space, w_obj) return space.wrap(address) - at unwrap_spec(address=int, owns=bool) -def bind_object(space, address, w_pycppclass, owns=False): + at unwrap_spec(owns=bool, cast=bool) +def bind_object(space, w_obj, w_pycppclass, owns=False, cast=False): """Takes an address and a bound C++ class proxy, returns a bound instance.""" - rawobject = rffi.cast(capi.C_OBJECT, address) + try: + # attempt address from array or C++ instance + rawobject = rffi.cast(capi.C_OBJECT, _addressof(space, w_obj)) + except Exception: + # accept integer value as address + rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj)) w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) if not w_cppclass: w_cppclass = scope_byname(space, space.str_w(w_pycppclass)) @@ -1191,4 +1237,4 @@ raise OperationError(space.w_TypeError, space.wrap("no such class: %s" % space.str_w(w_pycppclass))) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) - return wrap_cppobject(space, rawobject, cppclass, do_cast=False, python_owns=owns) + return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -55,6 +55,19 @@ def clgen_callback(name): return get_pycppclass(name) +def fngen_callback(func, npar): # todo, some kind of arg transform spec + if npar == 0: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + return func(la0) + return wrapper + else: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + la1 = [a1[i] for i in range(npar)] + return func(la0, la1) + return wrapper + def make_static_function(func_name, cppol): def function(*args): @@ -416,6 +429,9 @@ # class generator callback cppyy._set_class_generator(clgen_callback) + # function generator callback + cppyy._set_function_generator(fngen_callback) + # user interface objects (note the two-step of not calling scope_byname here: # creation of global functions may cause the creation of classes in the global # namespace, so gbl must exist at that point to cache them) @@ -431,6 +447,9 @@ # be the same issue for all typedef'd builtin types setattr(gbl, 'unsigned int', int) + # install nullptr as a unique reference + setattr(gbl, 'nullptr', cppyy._get_nullptr()) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -8,6 +8,7 @@ #include "TApplication.h" #include "TInterpreter.h" +#include "TVirtualMutex.h" #include "Getline.h" #include "TBaseClass.h" @@ -24,6 +25,8 @@ From noreply at buildbot.pypy.org Thu Apr 24 01:42:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 24 Apr 2014 01:42:56 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: merge py3k Message-ID: <20140423234256.E3C371C02F2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70903:f3443cbe086e Date: 2014-04-23 16:41 -0700 http://bitbucket.org/pypy/pypy/changeset/f3443cbe086e/ Log: merge py3k diff too long, truncating to 2000 out of 9793 lines diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -51,7 +51,7 @@ for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) - shutil.rmtree(self.temp_dir, True) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,7 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) - f.close() + f.close() def test_head(self): response = self.request( diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,6 +1,7 @@ import os, sys, imp import tempfile, binascii + def get_hashed_dir(cfile): with open(cfile,'r') as fid: content = fid.read() @@ -15,7 +16,7 @@ output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) - return output_dir + return output_dir def _get_c_extension_suffix(): diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -0,0 +1,154 @@ +======================= +What's new in PyPy 2.2+ +======================= + +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 + +.. branch: release-2.2.x + +.. branch: numpy-newbyteorder +Clean up numpy types, add newbyteorder functionality + +.. branch: windows-packaging +Package tk/tcl runtime with win32 + +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards + +.. branch: remove-intlong-smm +kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose + +.. branch: stdlib-2.7.6 +Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better + +.. branch: numpy-searchsorted +Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD + +.. branch: small-unroll-improvements +Improve optimization of small allocation-heavy loops in the JIT diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,145 +1,11 @@ ======================= -What's new in PyPy 2.2+ +What's new in PyPy 2.3+ ======================= -.. this is a revision shortly after release-2.2.x -.. startrev: 4cd1bc8b3111 +.. this is a revision shortly after release-2.3.x +.. startrev: ba569fe1efdb -.. branch: release-2.2.x +.. branch: small-unroll-improvements +Improve optimiziation of small allocation-heavy loops in the JIT -.. branch: numpy-newbyteorder -Clean up numpy types, add newbyteorder functionality - -.. branch: windows-packaging -Package tk/tcl runtime with win32 - -.. branch: armhf-singlefloat -JIT support for singlefloats on ARM using the hardfloat ABI - -.. branch: voidtype_strformat -Better support for record numpy arrays - -.. branch: osx-eci-frameworks-makefile -OSX: Ensure frameworks end up in Makefile when specified in External compilation info - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace and annotator. - -.. branch: ndarray-buffer -adds support for the buffer= argument to the ndarray ctor - -.. branch: better_ftime_detect2 -On OpenBSD do not pull in libcompat.a as it is about to be removed. -And more generally, if you have gettimeofday(2) you will not need ftime(3). - -.. branch: timeb_h -Remove dependency upon on OpenBSD. This will be disappearing -along with libcompat.a. - -.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 -Fix 3 broken links on PyPy published papers in docs. - -.. branch: jit-ordereddict - -.. branch: refactor-str-types -Remove multimethods on str/unicode/bytearray and make the implementations share code. - -.. branch: remove-del-from-generatoriterator -Speed up generators that don't yield inside try or wait blocks by skipping -unnecessary cleanup. - -.. branch: annotator -Remove FlowObjSpace. -Improve cohesion between rpython.flowspace and rpython.annotator. - -.. branch: detect-immutable-fields -mapdicts keep track of whether or not an attribute is every assigned to -multiple times. If it's only assigned once then an elidable lookup is used when -possible. - -.. branch: precompiled-headers -Create a Makefile using precompiled headers for MSVC platforms. -The downside is a messy nmake-compatible Makefile. Since gcc shows minimal -speedup, it was not implemented. - -.. branch: camelot -With a properly configured 256-color terminal (TERM=...-256color), the -Mandelbrot set shown during translation now uses a range of 50 colours. -Essential! - -.. branch: NonConstant -Simplify implementation of NonConstant. - -.. branch: array-propagate-len -Kill some guards and operations in JIT traces by adding integer bounds -propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). - -.. branch: optimize-int-and -Optimize away INT_AND with constant mask of 1s that fully cover the bitrange -of other operand. - -.. branch: bounds-int-add-or -Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the -operands are positive to kill some guards - -.. branch: remove-intlong-smm -kills int/long/smalllong/bool multimethods - -.. branch: numpy-refactor -Cleanup micronumpy module - -.. branch: int_w-refactor -In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. - -.. branch: test-58c3d8552833 -Fix for getarrayitem_gc_pure optimization - -.. branch: simple-range-strategy -Implements SimpleRangeListStrategy for case range(n) where n is a positive number. -Makes some traces nicer by getting rid of multiplication for calculating loop counter -and propagates that n > 0 further to get rid of guards. - -.. branch: popen-pclose -Provide an exit status for popen'ed RFiles via pclose - -.. branch: stdlib-2.7.6 -Update stdlib to v2.7.6 - -.. branch: virtual-raw-store-load -Support for virtualizing raw_store/raw_load operations - -.. branch: refactor-buffer-api -Separate the interp-level buffer API from the buffer type exposed to -app-level. The `Buffer` class is now used by `W_MemoryView` and -`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was -an alias to `Buffer`, which was wrappable itself. - -.. branch: improve-consecutive-dict-lookups -Improve the situation when dict lookups of the same key are performed in a chain - -.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 -.. branch: test_SetFromErrnoWithFilename_NULL -.. branch: test_SetFromErrnoWithFilename__tweaks - -.. branch: refactor_PyErr_SetFromErrnoWithFilename -Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext - -.. branch: win32-fixes4 -fix more tests for win32 - -.. branch: latest-improve-doc -Fix broken links in documentation - -.. branch: ast-issue1673 -fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field - -.. branch: issue1514 -Fix issues with reimporting builtin modules - -.. branch: numpypy-nditer -Implement the core of nditer, without many of the fancy flags (external_loop, buffered) - -.. branch: numpy-speed -Separate iterator from its state so jit can optimize better +.. branch: reflex-support diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -89,8 +89,23 @@ http://hboehm.info/gc/gc_source/gc-7.1.tar.gz Versions 7.0 and 7.1 are known to work; the 6.x series won't work with -pypy. Unpack this folder in the base directory. Then open a command -prompt:: +pypy. Unpack this folder in the base directory. +The default GC_abort(...) function in misc.c will try to open a MessageBox. +You may want to disable this with the following patch:: + + --- a/misc.c Sun Apr 20 14:08:27 2014 +0300 + +++ b/misc.c Sun Apr 20 14:08:37 2014 +0300 + @@ -1058,7 +1058,7 @@ + #ifndef PCR + void GC_abort(const char *msg) + { + -# if defined(MSWIN32) + +# if 0 && defined(MSWIN32) + (void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK); + # else + GC_err_printf("%s\n", msg); + +Then open a command prompt:: cd gc-7.1 nmake -f NT_THREADS_MAKEFILE diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -34,8 +34,9 @@ def slice_w(space, ctx, start, end, w_default): if 0 <= start <= end: - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrapbytes(ctx._string[start:end]) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrapbytes(ctx._buffer.getslice(start, end, 1, + end-start)) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -97,7 +98,7 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a StrMatchContext or a UnicodeMatchContext for searching + """Make a BufMatchContext or a UnicodeMatchContext for searching in the given w_string object.""" space = self.space if pos < 0: @@ -117,16 +118,18 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - str = space.bufferstr_w(w_string) + buf = space.buffer_w(w_string) if (not space.is_none(self.w_pattern) and space.isinstance_w(self.w_pattern, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap( "can't use a bytes pattern on a string-like object")) - if pos > len(str): - pos = len(str) - if endpos > len(str): - endpos = len(str) - return rsre_core.StrMatchContext(self.code, str, + size = buf.getlength() + assert size >= 0 + if pos > size: + pos = size + if endpos > size: + endpos = size + return rsre_core.BufMatchContext(self.code, buf, pos, endpos, self.flags) def getmatch(self, ctx, found): @@ -488,8 +491,8 @@ def fget_string(self, space): ctx = self.ctx - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrapbytes(ctx._string) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrapbytes(ctx._buffer.as_str()) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -12,8 +12,10 @@ '_template_byname' : 'interp_cppyy.template_byname', '_std_string_name' : 'interp_cppyy.std_string_name', '_set_class_generator' : 'interp_cppyy.set_class_generator', + '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', + '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstance' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -190,7 +190,8 @@ [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_get_methptr_getter(space, cppscope, index): return _c_get_methptr_getter(cppscope.handle, index) @@ -214,7 +215,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -222,7 +224,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -283,7 +286,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('2') def c_is_subtype(space, derived, base): if derived == base: @@ -295,7 +299,8 @@ [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True) + elidable_function=True, + random_effects_on_gcobjs=False) @jit.elidable_promote('1,2,4') def c_base_offset(space, derived, base, address, direction): if derived == base: @@ -543,19 +548,3 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) -_c_assign2stdstring = rffi.llexternal( - "cppyy_assign2stdstring", - [C_OBJECT, rffi.CCHARP], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_assign2stdstring(space, cppobject, svalue): - charp = rffi.str2charp(svalue) - _c_assign2stdstring(cppobject, charp) - rffi.free_charp(charp) -_c_free_stdstring = rffi.llexternal( - "cppyy_free_stdstring", - [C_OBJECT], lltype.Void, - releasegil=ts_helper, - compilation_info=backend.eci) -def c_free_stdstring(space, cppobject): - _c_free_stdstring(cppobject) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -6,8 +6,11 @@ from pypy.interpreter.baseobjspace import W_Root from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import libffi, rdynload +from rpython.tool.udir import udir + +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -19,21 +22,21 @@ if os.environ.get("ROOTSYS"): import commands (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] + if stat != 0: + rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: - rootincpath = [incdir] + rootincpath = [incdir, py.path.local(udir)] rootlibpath = commands.getoutput("root-config --libdir").split() else: - rootincpath = [] + rootincpath = [py.path.local(udir)] rootlibpath = [] def identify(): return 'CINT' -ts_reflect = False -ts_call = False +ts_reflect = True +ts_call = True ts_memory = False ts_helper = False @@ -47,13 +50,15 @@ _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) with rffi.scoped_str2charp('libCore.so') as ll_libname: _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) +with rffi.scoped_str2charp('libHist.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("cintcwrapper.cxx")], include_dirs=[incpath] + rootincpath, includes=["cintcwrapper.h"], library_dirs=rootlibpath, - libraries=["Core", "Cint"], + libraries=["Hist", "Core", "Cint"], use_cpp_linker=True, ) @@ -71,6 +76,23 @@ # CINT-specific pythonizations =============================================== +_c_charp2TString = rffi.llexternal( + "cppyy_charp2TString", + [rffi.CCHARP], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_charp2TString(space, svalue): + charp = rffi.str2charp(svalue) + result = _c_charp2TString(charp) + rffi.free_charp(charp) + return result +_c_TString2TString = rffi.llexternal( + "cppyy_TString2TString", + [C_OBJECT], C_OBJECT, + releasegil=ts_helper, + compilation_info=eci) +def c_TString2TString(space, cppobject): + return _c_TString2TString(cppobject) def _get_string_data(space, w_obj, m1, m2 = None): from pypy.module.cppyy import interp_cppyy @@ -80,10 +102,85 @@ return w_1 return obj.space.call_method(w_1, m2) +### TF1 ---------------------------------------------------------------------- +class State(object): + def __init__(self, space): + self.tfn_pyfuncs = [] + self.tfn_callbacks = [] + +_create_tf1 = rffi.llexternal( + "cppyy_create_tf1", + [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, + releasegil=False, + compilation_info=eci) + + at unwrap_spec(args_w='args_w') +def tf1_tf1(space, w_self, args_w): + """Pythonized version of TF1 constructor: + takes functions and callable objects, and allows a callback into them.""" + + from pypy.module.cppyy import interp_cppyy + tf1_class = interp_cppyy.scope_byname(space, "TF1") + + # expected signature: + # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) + argc = len(args_w) + + try: + # Note: argcount is +1 for the class (== w_self) + if argc < 5 or 6 < argc: + raise TypeError("wrong number of arguments") + + # second argument must be a name + funcname = space.str_w(args_w[1]) + + # last (optional) argument is number of parameters + npar = 0 + if argc == 6: npar = space.int_w(args_w[5]) + + # third argument must be a callable python object + w_callable = args_w[2] + if not space.is_true(space.callable(w_callable)): + raise TypeError("2nd argument is not a valid python callable") + + # generate a pointer to function + from pypy.module._cffi_backend import newtype, ctypefunc, func + + c_double = newtype.new_primitive_type(space, 'double') + c_doublep = newtype.new_pointer_type(space, c_double) + + # wrap the callable as the signature needs modifying + w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) + + w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) + w_callback = func.callback(space, w_cfunc, w_ifunc, None) + funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) + + # so far, so good; leaves on issue: CINT is expecting a wrapper, but + # we need the overload that takes a function pointer, which is not in + # the dictionary, hence this helper: + newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, + space.float_w(args_w[3]), space.float_w(args_w[4]), npar) + + from pypy.module.cppyy import interp_cppyy + w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, + do_cast=False, python_owns=True, fresh=True) + + # tie all the life times to the TF1 instance + space.setattr(w_instance, space.wrap('_callback'), w_callback) + + return w_instance + except (OperationError, TypeError, IndexError), e: + newargs_w = args_w[1:] # drop class + + # return control back to the original, unpythonized overload + ol = tf1_class.get_overload("TF1") + return ol.call(None, newargs_w) + ### TTree -------------------------------------------------------------------- _ttree_Branch = rffi.llexternal( "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], rffi.LONG, + [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, releasegil=False, compilation_info=eci) @@ -202,6 +299,8 @@ # some instance klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) w_obj = klass.construct() + # 0x10000 = kDeleteObject; reset because we own the object + space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) space.call_method(w_branch, "SetObject", w_obj) space.call_method(w_branch, "GetEntry", space.wrap(entry)) space.setattr(w_self, args_w[0], w_obj) @@ -274,6 +373,9 @@ allfuncs = [ + ### TF1 + tf1_tf1, + ### TTree ttree_Branch, ttree_iter, ttree_getattr, ] @@ -288,7 +390,14 @@ # callback coming in when app-level bound classes have been created def pythonize(space, name, w_pycppclass): - if name == "TFile": + if name == "TCollection": + _method_alias(space, w_pycppclass, "append", "Add") + _method_alias(space, w_pycppclass, "__len__", "GetSize") + + elif name == "TF1": + space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + + elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") elif name == "TObjString": @@ -310,3 +419,17 @@ elif name[0:8] == "TVectorT": # TVectorT<> template _method_alias(space, w_pycppclass, "__len__", "GetNoElements") + +# destruction callback (needs better solution, but this is for CINT +# only and should not appear outside of ROOT-specific uses) +from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL + + at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) +def _Py_cppyy_recursive_remove(space, cppobject): + from pypy.module.cppyy.interp_cppyy import memory_regulator + from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT + + obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) + if obj is not None: + memory_regulator.unregister(obj) + obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -0,0 +1,69 @@ +import py, os + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib import libffi, rdynload + +__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] + +pkgpath = py.path.local(__file__).dirpath().join(os.pardir) +srcpath = pkgpath.join("src") +incpath = pkgpath.join("include") + +import commands +(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") + +if os.environ.get("ROOTSYS"): + if config_stat != 0: # presumably Reflex-only + rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] + else: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() +else: + if config_stat == 0: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() + else: + rootincpath = [] + rootlibpath = [] + +def identify(): + return 'Cling' + +ts_reflect = False +ts_call = 'auto' +ts_memory = 'auto' +ts_helper = 'auto' + +std_string_name = 'std::basic_string' + +eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join("clingcwrapper.cxx")], + include_dirs=[incpath] + rootincpath, + includes=["clingcwrapper.h"], + library_dirs=rootlibpath, + libraries=["Cling"], + compile_extra=["-fno-strict-aliasing"], + use_cpp_linker=True, +) + +_c_load_dictionary = rffi.llexternal( + "cppyy_load_dictionary", + [rffi.CCHARP], rdynload.DLLHANDLE, + releasegil=False, + compilation_info=eci) + +def c_load_dictionary(name): + pch = _c_load_dictionary(name) + return pch + + +# Cling-specific pythonizations +def register_pythonizations(space): + "NOT_RPYTHON" + pass + +def pythonize(space, name, w_pycppclass): + pass diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -214,15 +214,22 @@ 'charp2stdstring' : ([c_ccharp], c_object), 'stdstring2stdstring' : ([c_object], c_object), - 'assign2stdstring' : ([c_object, c_ccharp], c_void), - 'free_stdstring' : ([c_object], c_void), } + # size/offset are backend-specific but fixed after load + self.c_sizeof_farg = 0 + self.c_offset_farg = 0 + + def load_reflection_library(space): state = space.fromcache(State) if state.library is None: from pypy.module._cffi_backend.libraryobj import W_Library state.library = W_Library(space, reflection_library, rdynload.RTLD_LOCAL | rdynload.RTLD_LAZY) + if state.library: + # fix constants + state.c_sizeof_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) + state.c_offset_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) return state.library def verify_backend(space): @@ -342,12 +349,12 @@ return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) def c_deallocate_function_args(space, cargs): call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) - at jit.elidable def c_function_arg_sizeof(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) - at jit.elidable + state = space.fromcache(State) + return state.c_sizeof_farg def c_function_arg_typeoffset(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) + state = space.fromcache(State) + return state.c_offset_farg # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): @@ -367,13 +374,12 @@ def c_base_name(space, cppclass, base_index): args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) - at jit.elidable_promote('2') def c_is_subtype(space, derived, base): + jit.promote(base) if derived == base: return bool(1) return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) - at jit.elidable_promote('1,2,4') def _c_base_offset(space, derived_h, base_h, address, direction): args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) @@ -504,11 +510,6 @@ return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) -def c_assign2stdstring(space, cppobject, svalue): - args = [_Arg(l=cppobject), _Arg(s=svalue)] - call_capi(space, 'assign2stdstring', args) -def c_free_stdstring(space, cppobject): - call_capi(space, 'free_stdstring', [_Arg(l=cppobject)]) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -6,8 +6,8 @@ from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import jit_libffi, rfloat -from pypy.module._rawffi.interp_rawffi import unpack_simple_shape -from pypy.module._rawffi.array import W_Array +from pypy.module._rawffi.interp_rawffi import letter2tp +from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -47,21 +47,35 @@ return rawobject return capi.C_NULL_OBJECT +def is_nullpointer_specialcase(space, w_obj): + # 0, None, and nullptr may serve as "NULL", check for any of them + + # integer 0 + try: + return space.int_w(w_obj) == 0 + except Exception: + pass + # None or nullptr + from pypy.module.cppyy import interp_cppyy + return space.is_true(space.is_(w_obj, space.w_None)) or \ + space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) + def get_rawbuffer(space, w_obj): + # raw buffer try: buf = space.buffer_w(w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass - # special case: allow integer 0 as NULL + # array type try: - buf = space.int_w(w_obj) - if buf == 0: - return rffi.cast(rffi.VOIDP, 0) + arr = space.interp_w(W_ArrayInstance, w_obj, can_be_None=True) + if arr: + return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) except Exception: pass - # special case: allow None as NULL - if space.is_true(space.is_(w_obj, space.w_None)): + # pre-defined NULL + if is_nullpointer_specialcase(space, w_obj): return rffi.cast(rffi.VOIDP, 0) raise TypeError("not an addressable buffer") @@ -118,7 +132,7 @@ def __getattr__(self, name): if name.startswith('array_'): typecode = name[len('array_'):] - arr = self.space.interp_w(W_Array, unpack_simple_shape(self.space, self.space.wrap(typecode))) + arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode)) setattr(self, name, arr) return arr raise AttributeError(name) @@ -139,8 +153,6 @@ self.size = array_size def from_memory(self, space, w_obj, w_pycppclass, offset): - if hasattr(space, "fake"): - raise NotImplementedError # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) address = rffi.cast(rffi.ULONG, address_value) @@ -261,8 +273,7 @@ self.name = name def convert_argument(self, space, w_obj, address, call_local): - raise OperationError(space.w_TypeError, - space.wrap('no converter available for type "%s"' % self.name)) + self._is_abstract(space) class BoolConverter(ffitypes.typeid(bool), TypeConverter): @@ -372,7 +383,12 @@ try: obj = get_rawbuffer(space, w_obj) except TypeError: - obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) + try: + # TODO: accept a 'capsule' rather than naked int + # (do accept int(0), though) + obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + except Exception: + obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj def convert_argument(self, space, w_obj, address, call_local): @@ -385,6 +401,24 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) + def from_memory(self, space, w_obj, w_pycppclass, offset): + # returned as a long value for the address (INTPTR_T is not proper + # per se, but rffi does not come with a PTRDIFF_T) + address = self._get_raw_address(space, w_obj, offset) + ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0]) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + arr = space.interp_w(W_Array, letter2tp(space, 'P')) + return arr.fromaddress(space, ptrval, sys.maxint) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + if is_nullpointer_specialcase(space, w_value): + address[0] = rffi.cast(rffi.VOIDP, 0) + else: + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class VoidPtrPtrConverter(TypeConverter): _immutable_fields_ = ['uses_local'] @@ -412,7 +446,7 @@ _immutable_fields_ = ['uses_local'] uses_local = True -class InstancePtrConverter(TypeConverter): +class InstanceRefConverter(TypeConverter): _immutable_fields_ = ['libffitype', 'cppclass'] libffitype = jit_libffi.types.pointer @@ -444,17 +478,7 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_obj)) - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - from pypy.module.cppyy import interp_cppyy - return interp_cppyy.wrap_cppobject(space, address, self.cppclass, - do_cast=False, is_ref=True) - - def to_memory(self, space, w_obj, w_value, offset): - address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) - address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) - -class InstanceConverter(InstancePtrConverter): +class InstanceConverter(InstanceRefConverter): def convert_argument_libffi(self, space, w_obj, address, call_local): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -468,6 +492,28 @@ def to_memory(self, space, w_obj, w_value, offset): self._is_abstract(space) + +class InstancePtrConverter(InstanceRefConverter): + + def _unwrap_object(self, space, w_obj): + try: + return InstanceRefConverter._unwrap_object(self, space, w_obj) + except OperationError, e: + # if not instance, allow certain special cases + if is_nullpointer_specialcase(space, w_obj): + return capi.C_NULL_OBJECT + raise e + + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, + do_cast=False, is_ref=True) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class InstancePtrPtrConverter(InstancePtrConverter): _immutable_fields_ = ['uses_local'] @@ -487,12 +533,6 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible - def from_memory(self, space, w_obj, w_pycppclass, offset): - self._is_abstract(space) - - def to_memory(self, space, w_obj, w_value, offset): - self._is_abstract(space) - def finalize_call(self, space, w_obj, call_local): from pypy.module.cppyy.interp_cppyy import W_CPPInstance assert isinstance(w_obj, W_CPPInstance) @@ -501,7 +541,6 @@ class StdStringConverter(InstanceConverter): - _immutable_fields_ = ['cppclass'] def __init__(self, space, extra): from pypy.module.cppyy import interp_cppyy @@ -509,24 +548,25 @@ InstanceConverter.__init__(self, space, cppclass) def _unwrap_object(self, space, w_obj): - try: + from pypy.module.cppyy.interp_cppyy import W_CPPInstance + if isinstance(w_obj, W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.c_stdstring2stdstring(space, arg) + else: return capi.c_charp2stdstring(space, space.str_w(w_obj)) - except Exception, e: - arg = InstanceConverter._unwrap_object(self, space, w_obj) - result = capi.c_stdstring2stdstring(space, arg) - return result def to_memory(self, space, w_obj, w_value, offset): try: address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) - capi.c_assign2stdstring(space, address, space.str_w(w_value)) - return + assign = self.cppclass.get_overload("__assign__") + from pypy.module.cppyy import interp_cppyy + assign.call( + interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False), [w_value]) except Exception: - pass - return InstanceConverter.to_memory(self, space, w_obj, w_value, offset) + InstanceConverter.to_memory(self, space, w_obj, w_value, offset) def free_argument(self, space, arg, call_local): - capi.c_free_stdstring(space, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) class StdStringRefConverter(InstancePtrConverter): _immutable_fields_ = ['cppclass'] @@ -570,6 +610,7 @@ def free_argument(self, space, arg, call_local): if hasattr(space, "fake"): raise NotImplementedError + space.getbuiltinmodule("cpyext") from pypy.module.cpyext.pyobject import Py_DecRef, PyObject Py_DecRef(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0])) @@ -627,8 +668,10 @@ # type check for the benefit of the annotator from pypy.module.cppyy.interp_cppyy import W_CPPClass cppclass = space.interp_w(W_CPPClass, cppclass, can_be_None=False) - if compound == "*" or compound == "&": + if compound == "*": return InstancePtrConverter(space, cppclass) + elif compound == "&": + return InstanceRefConverter(space, cppclass) elif compound == "**": return InstancePtrPtrConverter(space, cppclass) elif compound == "": @@ -654,7 +697,7 @@ _converters["void**"] = VoidPtrPtrConverter _converters["void*&"] = VoidPtrRefConverter -# special cases (note: CINT backend requires the simple name 'string') +# special cases (note: 'string' aliases added below) _converters["std::basic_string"] = StdStringConverter _converters["const std::basic_string&"] = StdStringConverter # TODO: shouldn't copy _converters["std::basic_string&"] = StdStringRefConverter @@ -776,3 +819,27 @@ for c_type, alias in aliases: _converters[alias] = _converters[c_type] _add_aliased_converters() + +# ROOT-specific converters (TODO: this is a general use case and should grow +# an API; putting it here is done only to circumvent circular imports) +if capi.identify() == "CINT": + + class TStringConverter(InstanceConverter): + def __init__(self, space, extra): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, "TString") + InstanceConverter.__init__(self, space, cppclass) + + def _unwrap_object(self, space, w_obj): + from pypy.module.cppyy import interp_cppyy + if isinstance(w_obj, interp_cppyy.W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.backend.c_TString2TString(space, arg) + else: + return capi.backend.c_charp2TString(space, space.str_w(w_obj)) + + def free_argument(self, space, arg, call_local): + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + + _converters["TString"] = TStringConverter + _converters["const TString&"] = TStringConverter diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -53,17 +53,12 @@ if hasattr(space, "fake"): raise NotImplementedError lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args) - address = rffi.cast(rffi.ULONG, lresult) + ptrval = rffi.cast(rffi.ULONG, lresult) arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap(self.typecode))) - if address == 0: - # TODO: fix this hack; fromaddress() will allocate memory if address - # is null and there seems to be no way around it (ll_buffer can not - # be touched directly) - nullarr = arr.fromaddress(space, address, 0) - assert isinstance(nullarr, W_ArrayInstance) - nullarr.free(space) - return nullarr - return arr.fromaddress(space, address, sys.maxint) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + return arr.fromaddress(space, ptrval, sys.maxint) class VoidExecutor(FunctionExecutor): @@ -144,7 +139,7 @@ from pypy.module.cppyy import interp_cppyy newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args) assert lltype.typeOf(newthis) == capi.C_OBJECT - return space.wrap(newthis) + return space.wrap(rffi.cast(rffi.LONG, newthis)) # really want ptrdiff_t here class InstancePtrExecutor(FunctionExecutor): @@ -160,7 +155,8 @@ from pypy.module.cppyy import interp_cppyy long_result = capi.c_call_l(space, cppmethod, cppthis, num_args, args) ptr_result = rffi.cast(capi.C_OBJECT, long_result) - return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + pyres = interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass) + return pyres def execute_libffi(self, space, cif_descr, funcaddr, buffer): jit_libffi.jit_ffi_call(cif_descr, funcaddr, buffer) @@ -189,7 +185,7 @@ long_result = capi.c_call_o(space, cppmethod, cppthis, num_args, args, self.cppclass) ptr_result = rffi.cast(capi.C_OBJECT, long_result) return interp_cppyy.wrap_cppobject(space, ptr_result, self.cppclass, - do_cast=False, python_owns=True) + do_cast=False, python_owns=True, fresh=True) def execute_libffi(self, space, cif_descr, funcaddr, buffer): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -206,6 +202,13 @@ from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible +class StdStringRefExecutor(InstancePtrExecutor): + + def __init__(self, space, cppclass): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, capi.std_string_name) + InstancePtrExecutor.__init__(self, space, cppclass) + class PyObjectExecutor(PtrTypeExecutor): @@ -295,12 +298,12 @@ _executors["void*"] = PtrTypeExecutor _executors["const char*"] = CStringExecutor -# special cases +# special cases (note: 'string' aliases added below) _executors["constructor"] = ConstructorExecutor _executors["std::basic_string"] = StdStringExecutor -_executors["const std::basic_string&"] = StdStringExecutor -_executors["std::basic_string&"] = StdStringExecutor # TODO: shouldn't copy +_executors["const std::basic_string&"] = StdStringRefExecutor +_executors["std::basic_string&"] = StdStringRefExecutor _executors["PyObject*"] = PyObjectExecutor @@ -363,7 +366,11 @@ "NOT_RPYTHON" aliases = ( ("const char*", "char*"), + ("std::basic_string", "string"), + ("const std::basic_string&", "const string&"), + ("std::basic_string&", "string&"), + ("PyObject*", "_object*"), ) diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -89,11 +89,11 @@ cppyy_index_t cppyy_get_global_operator( cppyy_scope_t scope, cppyy_scope_t lc, cppyy_scope_t rc, const char* op); - /* method properties ----------------------------------------------------- */ + /* method properties ------------------------------------------------------ */ int cppyy_is_constructor(cppyy_type_t type, cppyy_index_t idx); int cppyy_is_staticmethod(cppyy_type_t type, cppyy_index_t idx); - /* data member reflection information ------------------------------------ */ + /* data member reflection information ------------------------------------- */ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); @@ -101,7 +101,7 @@ int cppyy_datamember_index(cppyy_scope_t scope, const char* name); - /* data member properties ------------------------------------------------ */ + /* data member properties ------------------------------------------------- */ int cppyy_is_publicdata(cppyy_type_t type, int datamember_index); int cppyy_is_staticdata(cppyy_type_t type, int datamember_index); @@ -112,8 +112,6 @@ cppyy_object_t cppyy_charp2stdstring(const char* str); cppyy_object_t cppyy_stdstring2stdstring(cppyy_object_t ptr); - void cppyy_assign2stdstring(cppyy_object_t ptr, const char* str); - void cppyy_free_stdstring(cppyy_object_t ptr); #ifdef __cplusplus } diff --git a/pypy/module/cppyy/include/cintcwrapper.h b/pypy/module/cppyy/include/cintcwrapper.h --- a/pypy/module/cppyy/include/cintcwrapper.h +++ b/pypy/module/cppyy/include/cintcwrapper.h @@ -11,12 +11,18 @@ void* cppyy_load_dictionary(const char* lib_name); /* pythonization helpers */ + cppyy_object_t cppyy_create_tf1(const char* funcname, unsigned long address, + double xmin, double xmax, int npar); + cppyy_object_t cppyy_ttree_Branch( void* vtree, const char* branchname, const char* classname, void* addobj, int bufsize, int splitlevel); long long cppyy_ttree_GetEntry(void* vtree, long long entry); + cppyy_object_t cppyy_charp2TString(const char* str); + cppyy_object_t cppyy_TString2TString(cppyy_object_t ptr); + #ifdef __cplusplus } #endif // ifdef __cplusplus diff --git a/pypy/module/cppyy/include/clingcwrapper.h b/pypy/module/cppyy/include/clingcwrapper.h new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/include/clingcwrapper.h @@ -0,0 +1,37 @@ +#ifndef CPPYY_CLINGCWRAPPER +#define CPPYY_CLINGCWRAPPER + +#include "capi.h" + +#ifdef __cplusplus +extern "C" { +#endif // ifdef __cplusplus + + /* misc helpers */ + void* cppyy_load_dictionary(const char* lib_name); + +#ifdef __cplusplus +} +#endif // ifdef __cplusplus + +// TODO: pick up from llvm-config --cxxflags +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +// Wrapper callback: except this to become available from Cling directly +typedef void (*CPPYY_Cling_Wrapper_t)(void*, int, void**, void*); + +#endif // ifndef CPPYY_CLINGCWRAPPER diff --git a/pypy/module/cppyy/include/cppyy.h b/pypy/module/cppyy/include/cppyy.h --- a/pypy/module/cppyy/include/cppyy.h +++ b/pypy/module/cppyy/include/cppyy.h @@ -17,7 +17,7 @@ #ifdef __cplusplus struct CPPYY_G__p2p { #else -#typedef struct +typedef struct { #endif long i; int reftype; diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -40,9 +40,28 @@ def __init__(self, space): self.cppscope_cache = { "void" : W_CPPClass(space, "void", capi.C_NULL_TYPE) } + self.w_nullptr = None self.cpptemplate_cache = {} self.cppclass_registry = {} self.w_clgen_callback = None + self.w_fngen_callback = None + +def get_nullptr(space): + if hasattr(space, "fake"): + raise NotImplementedError + state = space.fromcache(State) + if state.w_nullptr is None: + from pypy.module._rawffi.interp_rawffi import unpack_simple_shape + from pypy.module._rawffi.array import W_Array, W_ArrayInstance + arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + # TODO: fix this hack; fromaddress() will allocate memory if address + # is null and there seems to be no way around it (ll_buffer can not + # be touched directly) + nullarr = arr.fromaddress(space, rffi.cast(rffi.ULONG, 0), 0) + assert isinstance(nullarr, W_ArrayInstance) + nullarr.free(space) + state.w_nullptr = space.wrap(nullarr) + return state.w_nullptr @unwrap_spec(name=str) def resolve_name(space, name): @@ -101,6 +120,11 @@ state = space.fromcache(State) state.w_clgen_callback = w_callback + at unwrap_spec(w_callback=W_Root) +def set_function_generator(space, w_callback): + state = space.fromcache(State) + state.w_fngen_callback = w_callback + def register_class(space, w_pycppclass): w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) @@ -108,7 +132,7 @@ # class allows simple aliasing of methods) capi.pythonize(space, cppclass.name, w_pycppclass) state = space.fromcache(State) - state.cppclass_registry[cppclass.handle] = w_pycppclass + state.cppclass_registry[rffi.cast(rffi.LONG, cppclass.handle)] = w_pycppclass class W_CPPLibrary(W_Root): @@ -580,12 +604,10 @@ def get_returntype(self): return self.space.wrap(self.converter.name) - @jit.elidable_promote() def _get_offset(self, cppinstance): if cppinstance: assert lltype.typeOf(cppinstance.cppclass.handle) == lltype.typeOf(self.scope.handle) - offset = self.offset + capi.c_base_offset(self.space, - cppinstance.cppclass, self.scope, cppinstance.get_rawobject(), 1) + offset = self.offset + cppinstance.cppclass.get_base_offset(cppinstance, self.scope) else: offset = self.offset return offset @@ -694,7 +716,6 @@ def get_method_names(self): return self.space.newlist([self.space.wrap(name) for name in self.methods]) - @jit.elidable_promote('0') def get_overload(self, name): try: return self.methods[name] @@ -707,7 +728,6 @@ def get_datamember_names(self): return self.space.newlist([self.space.wrap(name) for name in self.datamembers]) - @jit.elidable_promote('0') def get_datamember(self, name): try: return self.datamembers[name] @@ -717,7 +737,6 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote('0') def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature @@ -886,6 +905,10 @@ def find_datamember(self, name): raise self.missing_attribute_error(name) + def get_base_offset(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + return 0 + def get_cppthis(self, cppinstance, calling_scope): assert self == cppinstance.cppclass return cppinstance.get_rawobject() @@ -917,10 +940,15 @@ class W_ComplexCPPClass(W_CPPClass): - def get_cppthis(self, cppinstance, calling_scope): + def get_base_offset(self, cppinstance, calling_scope): assert self == cppinstance.cppclass offset = capi.c_base_offset(self.space, self, calling_scope, cppinstance.get_rawobject(), 1) + return offset + + def get_cppthis(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + offset = self.get_base_offset(cppinstance, calling_scope) return capi.direct_ptradd(cppinstance.get_rawobject(), offset) W_ComplexCPPClass.typedef = TypeDef( @@ -1130,19 +1158,23 @@ def get_pythonized_cppclass(space, handle): state = space.fromcache(State) try: - w_pycppclass = state.cppclass_registry[handle] + w_pycppclass = state.cppclass_registry[rffi.cast(rffi.LONG, handle)] except KeyError: final_name = capi.c_scoped_final_name(space, handle) # the callback will cache the class by calling register_class w_pycppclass = space.call_function(state.w_clgen_callback, space.wrap(final_name)) return w_pycppclass +def get_interface_func(space, w_callable, npar): + state = space.fromcache(State) + return space.call_function(state.w_fngen_callback, w_callable, space.wrap(npar)) + def wrap_cppobject(space, rawobject, cppclass, do_cast=True, python_owns=False, is_ref=False, fresh=False): rawobject = rffi.cast(capi.C_OBJECT, rawobject) - # cast to actual cast if requested and possible - w_pycppclass = space.w_None + # cast to actual if requested and possible + w_pycppclass = None if do_cast and rawobject: actual = capi.c_actual_class(space, cppclass, rawobject) if actual != cppclass.handle: @@ -1158,7 +1190,7 @@ # the variables are re-assigned yet) pass - if space.is_w(w_pycppclass, space.w_None): + if w_pycppclass is None: w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created @@ -1174,16 +1206,30 @@ memory_regulator.register(cppinstance) return w_cppinstance - at unwrap_spec(w_cppinstance=W_CPPInstance) -def addressof(space, w_cppinstance): - """Takes a bound C++ instance, returns the raw address.""" - address = rffi.cast(rffi.LONG, w_cppinstance.get_rawobject()) +def _addressof(space, w_obj): + try: + # attempt to extract address from array + return rffi.cast(rffi.INTPTR_T, converter.get_rawbuffer(space, w_obj)) + except TypeError: + pass + # attempt to get address of C++ instance + return rffi.cast(rffi.INTPTR_T, converter.get_rawobject(space, w_obj)) + + at unwrap_spec(w_obj=W_Root) +def addressof(space, w_obj): + """Takes a bound C++ instance or array, returns the raw address.""" + address = _addressof(space, w_obj) return space.wrap(address) - at unwrap_spec(address=int, owns=bool) -def bind_object(space, address, w_pycppclass, owns=False): + at unwrap_spec(owns=bool, cast=bool) +def bind_object(space, w_obj, w_pycppclass, owns=False, cast=False): """Takes an address and a bound C++ class proxy, returns a bound instance.""" - rawobject = rffi.cast(capi.C_OBJECT, address) + try: + # attempt address from array or C++ instance + rawobject = rffi.cast(capi.C_OBJECT, _addressof(space, w_obj)) + except Exception: + # accept integer value as address + rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj)) w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) if not w_cppclass: w_cppclass = scope_byname(space, space.str_w(w_pycppclass)) @@ -1191,4 +1237,4 @@ raise OperationError(space.w_TypeError, space.wrap("no such class: %s" % space.str_w(w_pycppclass))) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) - return wrap_cppobject(space, rawobject, cppclass, do_cast=False, python_owns=owns) + return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -55,6 +55,19 @@ def clgen_callback(name): return get_pycppclass(name) +def fngen_callback(func, npar): # todo, some kind of arg transform spec + if npar == 0: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + return func(la0) + return wrapper + else: + def wrapper(a0, a1): + la0 = [a0[0], a0[1], a0[2], a0[3]] + la1 = [a1[i] for i in range(npar)] + return func(la0, la1) + return wrapper + def make_static_function(func_name, cppol): def function(*args): @@ -416,6 +429,9 @@ # class generator callback cppyy._set_class_generator(clgen_callback) + # function generator callback + cppyy._set_function_generator(fngen_callback) + # user interface objects (note the two-step of not calling scope_byname here: # creation of global functions may cause the creation of classes in the global # namespace, so gbl must exist at that point to cache them) @@ -431,6 +447,9 @@ # be the same issue for all typedef'd builtin types setattr(gbl, 'unsigned int', int) + # install nullptr as a unique reference + setattr(gbl, 'nullptr', cppyy._get_nullptr()) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -8,6 +8,7 @@ #include "TApplication.h" #include "TInterpreter.h" +#include "TVirtualMutex.h" #include "Getline.h" #include "TBaseClass.h" @@ -24,6 +25,8 @@ From noreply at buildbot.pypy.org Thu Apr 24 01:42:58 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 24 Apr 2014 01:42:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: remove extra double quote Message-ID: <20140423234258.415841C02F2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70904:790fb4852883 Date: 2014-04-23 16:41 -0700 http://bitbucket.org/pypy/pypy/changeset/790fb4852883/ Log: remove extra double quote diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -219,7 +219,8 @@ def cloexec_pipe(space): - """"cloexec_pipe() -> (read_end, write_end) + """cloexec_pipe() -> (read_end, write_end) + Create a pipe whose ends have the cloexec flag set.""" with lltype.scoped_alloc(rffi.CArrayPtr(rffi.INT).TO, 2) as fds: From noreply at buildbot.pypy.org Thu Apr 24 01:47:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 01:47:38 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: move buffer to rpython Message-ID: <20140423234738.A32781C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70905:a0c9719c5500 Date: 2014-04-23 19:41 -0400 http://bitbucket.org/pypy/pypy/changeset/a0c9719c5500/ Log: move buffer to rpython diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,7 +2,7 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import Buffer +from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,10 +1,10 @@ -from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray from pypy.objspace.std.bufferobject import W_Buffer +from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import Buffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,4 +1,4 @@ -from pypy.interpreter.buffer import Buffer +from rpython.rlib.buffer import Buffer # XXX not the most efficient implementation diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -1,6 +1,7 @@ from __future__ import with_statement from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable @@ -9,7 +10,6 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( interp2app, interpindirect2app, unwrap_spec) diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -1,5 +1,5 @@ +from rpython.rlib.buffer import StringBuffer, SubBuffer from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.buffer import StringBuffer, SubBuffer from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -15,8 +15,8 @@ from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments +from rpython.rlib.buffer import Buffer from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize from rpython.tool.sourcetools import func_renamer diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,6 +1,6 @@ -from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -2,8 +2,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.buffer import Buffer from rpython.rlib import rmmap, rarithmetic +from rpython.rlib.buffer import Buffer from rpython.rlib.rmmap import RValueError, RTypeError, RMMapError if rmmap.HAVE_LARGEFILE_SUPPORT: diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -1,10 +1,10 @@ from rpython.annotator.model import SomeInstance, s_None from pypy.interpreter import argument, gateway from pypy.interpreter.baseobjspace import W_Root, ObjSpace, SpaceCache -from pypy.interpreter.buffer import Buffer from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.sliceobject import W_SliceObject +from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import instantiate, we_are_translated, specialize from rpython.rlib.nonconst import NonConstant from rpython.rlib.rarithmetic import r_uint, r_singlefloat diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -3,7 +3,7 @@ """ import operator -from pypy.interpreter import buffer +from rpython.rlib.buffer import Buffer, StringBuffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -19,7 +19,7 @@ """ def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) + assert isinstance(buf, Buffer) self.buf = buf def buffer_w(self, space, flags): @@ -45,7 +45,6 @@ builder = StringBuilder(len(unistr) * UNICODE_SIZE) for unich in unistr: pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer buf = StringBuffer(builder.build()) else: buf = space.readbuf_w(w_object) @@ -59,7 +58,7 @@ if size < -1: raise OperationError(space.w_ValueError, space.wrap("size must be zero or positive")) - buf = buffer.SubBuffer(buf, offset, size) + buf = SubBuffer(buf, offset, size) return W_Buffer(buf) def descr_len(self, space): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -2,10 +2,10 @@ from rpython.rlib.objectmodel import ( import_from_mixin, newlist_hint, resizelist_hint) +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.signature import Signature diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -3,10 +3,10 @@ from rpython.rlib.jit import we_are_jitted from rpython.rlib.objectmodel import ( compute_hash, compute_unique_id, import_from_mixin) +from rpython.rlib.buffer import StringBuffer from rpython.rlib.rstring import StringBuilder, replace from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -3,7 +3,7 @@ """ import operator -from pypy.interpreter import buffer +from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -35,7 +35,7 @@ """ def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) + assert isinstance(buf, Buffer) self.buf = buf def buffer_w(self, space, flags): @@ -85,7 +85,7 @@ size = stop - start if size < 0: size = 0 - buf = buffer.SubBuffer(self.buf, start, size) + buf = SubBuffer(self.buf, start, size) return W_MemoryView(buf) def descr_tobytes(self, space): diff --git a/pypy/interpreter/buffer.py b/rpython/rlib/buffer.py rename from pypy/interpreter/buffer.py rename to rpython/rlib/buffer.py diff --git a/pypy/interpreter/test/test_buffer.py b/rpython/rlib/test/test_buffer.py rename from pypy/interpreter/test/test_buffer.py rename to rpython/rlib/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -1,43 +1,9 @@ -import py -from rpython.tool.udir import udir +from rpython.rlib.buffer import * -testdir = udir.ensure('test_buffer', dir=1) - -class TestBuffer: - def test_buffer_w(self): - space = self.space - w_hello = space.wrap('hello world') - buf = space.buffer_w(w_hello, space.BUF_SIMPLE) - assert buf.getlength() == 11 - assert buf.as_str() == 'hello world' - assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.newbuffer(buf), space.BUF_SIMPLE) is buf - assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello, space.BUF_SIMPLE))) == 'hello world' - space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5), space.BUF_SIMPLE) - - def test_file_write(self): - space = self.space - w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'), space.BUF_SIMPLE)) - filename = str(testdir.join('test_file_write')) - space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): - f = open(filename, 'wb') - f.write(buffer) - f.close() - """) - f = open(filename, 'rb') - data = f.read() - f.close() - assert data == 'hello world' - - def test_unicode(self): - space = self.space - s = space.bufferstr_w(space.wrap(u'hello')) - assert type(s) is str - assert s == 'hello' - space.raises_w(space.w_UnicodeEncodeError, - space.bufferstr_w, space.wrap(u'\xe9')) - - -# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. +def test_string_buffer(): + buf = StringBuffer('hello world') + assert buf.getitem(4) == 'o' + assert buf.getlength() == 11 + assert buf.getslice(1, 6, 1, 5) == 'ello ' + assert buf.as_str() == 'hello world' From noreply at buildbot.pypy.org Thu Apr 24 01:54:18 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 24 Apr 2014 01:54:18 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: open up more tests based on dummy backend Message-ID: <20140423235418.1E6471C02F2@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70906:e1e330d8479f Date: 2014-04-23 16:51 -0700 http://bitbucket.org/pypy/pypy/changeset/e1e330d8479f/ Log: open up more tests based on dummy backend diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -38,6 +38,24 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; +class PseudoExample01 { +public: + PseudoExample01() : m_somedata(-99) {} + PseudoExample01(int a) : m_somedata(a) {} + PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} + PseudoExample01& operator=(const PseudoExample01& e) { + if (this != &e) m_somedata = e.m_somedata; + return *this; + } + virtual ~PseudoExample01() {} + +public: + int m_somedata; +}; + +static int example01_last_static_method = 0; +static int example01_last_constructor = 0; + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- @@ -46,27 +64,62 @@ std::vector methods; - // static double staticAddToDouble(double a); + // ( 0) static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); - // static int staticAddOneToInt(int a); - // static int staticAddOneToInt(int a, int b); + // ( 1) static int staticAddOneToInt(int a) + // ( 2) static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); - // static int staticAtoi(const char* str); + // ( 3) static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); - // static char* staticStrcpy(const char* strin); + // ( 4) static char* staticStrcpy(const char* strin) methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // ( 5) static void staticSetPayload(payload* p, double d) + // ( 6) static payload* staticCyclePayload(payload* p, double d) + // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + argtypes.clear(); + argtypes.push_back("payload*"); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + + // ( 8) static int getCount() + // ( 9) static void setCount(int) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + + // cut-off is used in cppyy_is_static + example01_last_static_method = methods.size(); + + // (10) example01() + // (11) example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + + // cut-off is used in cppyy_is_constructor + example01_last_constructor = methods.size(); + + // (12) double addDataToDouble(double a) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; // -- class example01 @@ -98,47 +151,69 @@ } +/* memory management ------------------------------------------------------ */ +void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { + if (handle == s_handles["example01"]) + delete (PseudoExample01*)self; +} + + /* method/function dispatching -------------------------------------------- */ -template -static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - T result = T(); +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + int result = 0; switch ((long)method) { - case 0: // double staticAddToDouble(double) - assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; - break; - case 1: // int staticAddOneToInt(int) + case 1: // static int staticAddOneToInt(int) assert(!self && nargs == 1); result = ((CPPYY_G__value*)args)[0].obj.in + 1; break; - case 2: // int staticAddOneToInt(int, int) + case 2: // static int staticAddOneToInt(int, int) assert(!self && nargs == 2); result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; break; - case 3: // int staticAtoi(const char* str) + case 3: // static int staticAtoi(const char* str) assert(!self && nargs == 1); result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; + case 8: // static int getCount() + assert(!self && nargs == 0); + // can't actually call this method (would need to resolve example01::count), but + // other than the memory tests, most tests just check for 0 at the end + result = 0; + break; default: + assert(!"method unknown in cppyy_call_i"); break; } return result; } -int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); -} - long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + if ((long)method == 4) { // static char* staticStrcpy(const char* strin) + const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); + char* strout = (char*)malloc(::strlen(strin)+1); + ::strcpy(strout, strin); + return (long)strout; + } + assert(!"method unknown in cppyy_call_l"); + return 0; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); + double result = 0.; + switch ((long)method) { + case 0: // static double staticAddToDouble(double) + assert(!self && nargs == 1); + result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + break; + case 12: // double addDataToDouble(double a) + assert(self && nargs == 1); + result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + break; + default: + assert(!"method unknown in cppyy_call_d"); + break; + } + return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { @@ -149,10 +224,31 @@ return strout; } +cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { + void* result = 0; + if (handle == s_handles["example01"]) { + switch ((long)method) { + case 10: + assert(nargs == 0); + result = new PseudoExample01; + break; + case 11: + assert(nargs == 1); + result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + break; + default: + assert(!"method unknown in cppyy_constructor"); + break; + } + } + return (cppyy_object_t)result; +} + cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { return (cppyy_methptrgetter_t)0; } + /* handling of function argument buffer ----------------------------------- */ void* cppyy_allocate_function_args(size_t nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); @@ -200,7 +296,11 @@ } int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { - return 1; + return 0; +} + +int cppyy_num_bases(cppyy_type_t /*handle*/) { + return 0; } @@ -252,11 +352,16 @@ /* method properties ----------------------------------------------------- */ -int cppyy_is_constructor(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return example01_last_static_method <= method_index + && method_index < example01_last_constructor; return 0; } -int cppyy_is_staticmethod(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return method_index < example01_last_static_method ? 1 : 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,11 +7,12 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not item.location[0] in ['test_helper.py', 'test_cppyy.py']: + if not ('test_helper.py' in item.location[0] or \ + 'test_cppyy.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if item.location[0] == 'test_cppyy.py' and \ - not re.search("test0[1-3]", item.location[2]): + if 'test_cppyy.py' in item.location[0] and \ + not re.search("test0[1-36]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_configure(config): From noreply at buildbot.pypy.org Thu Apr 24 01:54:19 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 24 Apr 2014 01:54:19 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support: more tests based on dummy backend Message-ID: <20140423235419.7AC8C1C02F2@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70907:656185bf25ce Date: 2014-04-23 16:53 -0700 http://bitbucket.org/pypy/pypy/changeset/656185bf25ce/ Log: merge reflex-support: more tests based on dummy backend diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -38,6 +38,24 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; +class PseudoExample01 { +public: + PseudoExample01() : m_somedata(-99) {} + PseudoExample01(int a) : m_somedata(a) {} + PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} + PseudoExample01& operator=(const PseudoExample01& e) { + if (this != &e) m_somedata = e.m_somedata; + return *this; + } + virtual ~PseudoExample01() {} + +public: + int m_somedata; +}; + +static int example01_last_static_method = 0; +static int example01_last_constructor = 0; + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- @@ -46,27 +64,62 @@ std::vector methods; - // static double staticAddToDouble(double a); + // ( 0) static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); - // static int staticAddOneToInt(int a); - // static int staticAddOneToInt(int a, int b); + // ( 1) static int staticAddOneToInt(int a) + // ( 2) static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); - // static int staticAtoi(const char* str); + // ( 3) static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); - // static char* staticStrcpy(const char* strin); + // ( 4) static char* staticStrcpy(const char* strin) methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // ( 5) static void staticSetPayload(payload* p, double d) + // ( 6) static payload* staticCyclePayload(payload* p, double d) + // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + argtypes.clear(); + argtypes.push_back("payload*"); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + + // ( 8) static int getCount() + // ( 9) static void setCount(int) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + + // cut-off is used in cppyy_is_static + example01_last_static_method = methods.size(); + + // (10) example01() + // (11) example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + + // cut-off is used in cppyy_is_constructor + example01_last_constructor = methods.size(); + + // (12) double addDataToDouble(double a) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; // -- class example01 @@ -98,47 +151,69 @@ } +/* memory management ------------------------------------------------------ */ +void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { + if (handle == s_handles["example01"]) + delete (PseudoExample01*)self; +} + + /* method/function dispatching -------------------------------------------- */ -template -static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - T result = T(); +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + int result = 0; switch ((long)method) { - case 0: // double staticAddToDouble(double) - assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; - break; - case 1: // int staticAddOneToInt(int) + case 1: // static int staticAddOneToInt(int) assert(!self && nargs == 1); result = ((CPPYY_G__value*)args)[0].obj.in + 1; break; - case 2: // int staticAddOneToInt(int, int) + case 2: // static int staticAddOneToInt(int, int) assert(!self && nargs == 2); result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; break; - case 3: // int staticAtoi(const char* str) + case 3: // static int staticAtoi(const char* str) assert(!self && nargs == 1); result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; + case 8: // static int getCount() + assert(!self && nargs == 0); + // can't actually call this method (would need to resolve example01::count), but + // other than the memory tests, most tests just check for 0 at the end + result = 0; + break; default: + assert(!"method unknown in cppyy_call_i"); break; } return result; } -int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); -} - long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + if ((long)method == 4) { // static char* staticStrcpy(const char* strin) + const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); + char* strout = (char*)malloc(::strlen(strin)+1); + ::strcpy(strout, strin); + return (long)strout; + } + assert(!"method unknown in cppyy_call_l"); + return 0; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); + double result = 0.; + switch ((long)method) { + case 0: // static double staticAddToDouble(double) + assert(!self && nargs == 1); + result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + break; + case 12: // double addDataToDouble(double a) + assert(self && nargs == 1); + result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + break; + default: + assert(!"method unknown in cppyy_call_d"); + break; + } + return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { @@ -149,10 +224,31 @@ return strout; } +cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { + void* result = 0; + if (handle == s_handles["example01"]) { + switch ((long)method) { + case 10: + assert(nargs == 0); + result = new PseudoExample01; + break; + case 11: + assert(nargs == 1); + result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + break; + default: + assert(!"method unknown in cppyy_constructor"); + break; + } + } + return (cppyy_object_t)result; +} + cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { return (cppyy_methptrgetter_t)0; } + /* handling of function argument buffer ----------------------------------- */ void* cppyy_allocate_function_args(size_t nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); @@ -200,7 +296,11 @@ } int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { - return 1; + return 0; +} + +int cppyy_num_bases(cppyy_type_t /*handle*/) { + return 0; } @@ -252,11 +352,16 @@ /* method properties ----------------------------------------------------- */ -int cppyy_is_constructor(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return example01_last_static_method <= method_index + && method_index < example01_last_constructor; return 0; } -int cppyy_is_staticmethod(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return method_index < example01_last_static_method ? 1 : 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,11 +7,12 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not item.location[0] in ['test_helper.py', 'test_cppyy.py']: + if not ('test_helper.py' in item.location[0] or \ + 'test_cppyy.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if item.location[0] == 'test_cppyy.py' and \ - not re.search("test0[1-3]", item.location[2]): + if 'test_cppyy.py' in item.location[0] and \ + not re.search("test0[1-36]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_configure(config): From noreply at buildbot.pypy.org Thu Apr 24 02:16:48 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 24 Apr 2014 02:16:48 +0200 (CEST) Subject: [pypy-commit] pypy default: doc updates (clarification and a description of nullptr) Message-ID: <20140424001648.44CAB1C06C3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70908:d426723559fb Date: 2014-04-23 17:15 -0700 http://bitbucket.org/pypy/pypy/changeset/d426723559fb/ Log: doc updates (clarification and a description of nullptr) diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== From noreply at buildbot.pypy.org Thu Apr 24 02:56:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 24 Apr 2014 02:56:11 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: revert back to usage of finditem/setitem_str Message-ID: <20140424005611.C6D701C099D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70909:36af1cd6bd2c Date: 2014-04-23 17:13 -0700 http://bitbucket.org/pypy/pypy/changeset/36af1cd6bd2c/ Log: revert back to usage of finditem/setitem_str diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -433,6 +433,9 @@ def getconstant_w(self, index): return self.getcode().co_consts_w[index] + def getname_u(self, index): + return self.space.identifier_w(self.getname_w(index)) + def getname_w(self, index): return self.getcode().co_names_w[index] @@ -753,9 +756,9 @@ self.pushvalue(w_build_class) def STORE_NAME(self, varindex, next_instr): - w_varname = self.getname_w(varindex) + varname = self.getname_u(varindex) w_newvalue = self.popvalue() - self.space.setitem(self.w_locals, w_varname, w_newvalue) + self.space.setitem_str(self.w_locals, varname, w_newvalue) def DELETE_NAME(self, varindex, next_instr): w_varname = self.getname_w(varindex) @@ -765,8 +768,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - raise oefmt(self.space.w_NameError, "name '%s' is not defined", - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, + "name %R is not defined", w_varname) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -817,7 +820,7 @@ self.space.delattr(w_obj, w_attributename) def STORE_GLOBAL(self, nameindex, next_instr): - varname = self.space.str_w(self.getname_w(nameindex)) + varname = self.getname_u(nameindex) w_newvalue = self.popvalue() self.space.setitem_str(self.w_globals, varname, w_newvalue) @@ -827,24 +830,24 @@ def LOAD_NAME(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) + varname = self.space.identifier_w(w_varname) if self.w_locals is not self.w_globals: - w_value = self.space.finditem(self.w_locals, w_varname) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return # fall-back - w_value = self._load_global(w_varname) + w_value = self._load_global(varname) if w_value is None: raise oefmt(self.space.w_NameError, "name %R is not defined", w_varname) self.pushvalue(w_value) - def _load_global(self, w_varname): - w_value = self.space.finditem(self.w_globals, w_varname) + def _load_global(self, varname): + w_value = self.space.finditem_str(self.w_globals, varname) if w_value is None: # not in the globals, now look in the built-ins - w_value = self.get_builtin().getdictvalue( - self.space, self.space.identifier_w(w_varname)) + w_value = self.get_builtin().getdictvalue(self.space, varname) return w_value _load_global._always_inline_ = True @@ -855,7 +858,7 @@ def LOAD_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - w_value = self._load_global(w_varname) + w_value = self._load_global(self.space.identifier_w(w_varname)) if w_value is None: self._load_global_failed(w_varname) self.pushvalue(w_value) @@ -993,7 +996,7 @@ if not e.match(self.space, self.space.w_AttributeError): raise raise oefmt(self.space.w_ImportError, - "cannot import name '%s'", self.space.str_w(w_name)) + "cannot import name %R", w_name) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): From noreply at buildbot.pypy.org Thu Apr 24 03:27:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 03:27:54 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: comment funcs Message-ID: <20140424012754.841FF1C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70910:e28616a98151 Date: 2014-04-23 20:10 -0400 http://bitbucket.org/pypy/pypy/changeset/e28616a98151/ Log: comment funcs diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1371,7 +1371,7 @@ BUF_CONTIG_RO = 3 def buffer_w(self, w_obj, flags): - # New buffer interface, returns a buffer based on flags + # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) try: return w_obj.buffer_w(self, flags) except TypeError: @@ -1379,7 +1379,7 @@ "'%T' does not have the buffer interface", w_obj) def readbuf_w(self, w_obj): - # Old buffer interface, returns a readonly buffer + # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) try: return w_obj.readbuf_w(self) except TypeError: @@ -1387,7 +1387,7 @@ "expected a readable buffer object") def writebuf_w(self, w_obj): - # Old buffer interface, returns a writeable buffer + # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) try: return w_obj.writebuf_w(self) except TypeError: @@ -1395,7 +1395,7 @@ "expected a writeable buffer object") def charbuf_w(self, w_obj): - # Old buffer interface, returns a character buffer + # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) try: return w_obj.charbuf_w(self) except TypeError: From noreply at buildbot.pypy.org Thu Apr 24 03:27:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 03:27:56 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix validation of buffer writability Message-ID: <20140424012756.309391C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70911:ac02d8345ef9 Date: 2014-04-23 21:06 -0400 http://bitbucket.org/pypy/pypy/changeset/ac02d8345ef9/ Log: fix validation of buffer writability diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) + compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX @@ -1369,6 +1369,11 @@ BUF_FULL_RO = 1 BUF_CONTIG = 2 BUF_CONTIG_RO = 3 + BUF_WRITABLE = 4 + + def check_buf_flags(self, flags, readonly): + if flags & self.BUF_WRITABLE == self.BUF_WRITABLE and readonly: + raise oefmt(self.w_BufferError, "Object is not writable.") def buffer_w(self, w_obj, flags): # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) @@ -1402,6 +1407,26 @@ raise oefmt(self.w_TypeError, "expected a character buffer object") + def _getarg_error(self, expected, w_obj): + raise oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + + @specialize.arg(1) + def getarg_w(self, code, w_obj): + if code == 'w*': + try: + try: + return w_obj.buffer_w(self, self.BUF_WRITABLE) + except OperationError: + self._getarg_error("read-write buffer", w_obj) + except TypeError: + pass + try: + return w_obj.writebuf_w(self) + except TypeError: + self._getarg_error("read-write buffer", w_obj) + else: + assert False + # XXX rename these/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -80,7 +80,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.buffer_w(w_buffer, space.BUF_CONTIG) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -41,7 +41,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.buffer_w(w_buffer, space.BUF_CONTIG) + rwbuffer = space.getarg_w('w*', w_buffer) size = rwbuffer.getlength() output = self.read(size) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -366,7 +366,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.buffer_w(w_buffer, space.BUF_CONTIG) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,8 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\ncxxxxx' diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -97,6 +97,8 @@ a2 = bytearray('testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() assert a1 == "h" assert a2 == "elloing" diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -135,6 +135,8 @@ a = bytearray('x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\nc\0\0\0\0\0' # diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -443,6 +443,7 @@ return self._value def buffer_w(self, space, flags): + space.check_buf_flags(flags, True) return StringBuffer(self._value) def readbuf_w(self, space): diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -34,16 +34,17 @@ an interp-level buffer. """ - def __init__(self, buf): + def __init__(self, obj, buf): assert isinstance(buf, Buffer) + self.obj = obj self.buf = buf def buffer_w(self, space, flags): - return self.buf + return space.buffer_w(self.obj, flags) @staticmethod def descr_new_memoryview(space, w_subtype, w_object): - return W_MemoryView(space.buffer_w(w_object, space.BUF_FULL_RO)) + return W_MemoryView(w_object, space.buffer_w(w_object, space.BUF_FULL_RO)) def _make_descr__cmp(name): def descr__cmp(self, space, w_other): @@ -86,7 +87,7 @@ if size < 0: size = 0 buf = SubBuffer(self.buf, start, size) - return W_MemoryView(buf) + return W_MemoryView(self.obj, buf) def descr_tobytes(self, space): return space.wrap(self.as_str()) From noreply at buildbot.pypy.org Thu Apr 24 03:56:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 03:56:23 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: test/fix bufferstr_w behavior Message-ID: <20140424015623.97C391C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70912:e2178919af90 Date: 2014-04-23 21:50 -0400 http://bitbucket.org/pypy/pypy/changeset/e2178919af90/ Log: test/fix bufferstr_w behavior diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1408,7 +1408,11 @@ "expected a character buffer object") def _getarg_error(self, expected, w_obj): - raise oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + if self.is_none(w_obj): + name = "None" + else: + name = self.type(w_obj).getname(self) + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) @specialize.arg(1) def getarg_w(self, code, w_obj): @@ -1442,7 +1446,14 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - return self.readbuf_w(w_obj).as_str() + try: + buf = w_obj.buffer_w(self, 0) + except TypeError: + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + return buf.as_str() def bufferchar_w(self, w_obj): try: diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -545,8 +545,12 @@ s.connect(("www.python.org", 80)) except _socket.gaierror, ex: skip("GAIError - probably no connection: %s" % str(ex.args)) + exc = raises(TypeError, s.send, None) + assert str(exc.value) == "must be string or buffer, not None" assert s.send(buffer('')) == 0 assert s.sendall(buffer('')) is None + assert s.send(memoryview('')) == 0 + assert s.sendall(memoryview('')) is None assert s.send(u'') == 0 assert s.sendall(u'') is None raises(UnicodeEncodeError, s.send, u'\xe9') From noreply at buildbot.pypy.org Thu Apr 24 03:56:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 03:56:24 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: kill bufferchar_w Message-ID: <20140424015624.C480A1C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70913:33a8fc69f913 Date: 2014-04-23 21:54 -0400 http://bitbucket.org/pypy/pypy/changeset/33a8fc69f913/ Log: kill bufferchar_w diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1431,7 +1431,7 @@ else: assert False - # XXX rename these/replace with code more like CPython getargs for buffers + # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): @@ -1455,14 +1455,6 @@ self._getarg_error("string or buffer", w_obj) return buf.as_str() - def bufferchar_w(self, w_obj): - try: - return self.str_w(w_obj) - except OperationError, e: - if not e.match(self, self.w_TypeError): - raise - return self.charbuf_w(w_obj) - def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): return None diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -477,8 +477,14 @@ def _val(self, space): return self._value - def _op_val(self, space, w_other): - return space.bufferchar_w(w_other) + @staticmethod + def _op_val(space, w_other): + try: + return space.str_w(w_other) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + return space.charbuf_w(w_other) def _chr(self, char): assert len(char) == 1 From noreply at buildbot.pypy.org Thu Apr 24 04:01:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 04:01:39 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix translation Message-ID: <20140424020139.BB1531C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70914:820aa0f69a94 Date: 2014-04-23 22:01 -0400 http://bitbucket.org/pypy/pypy/changeset/820aa0f69a94/ Log: fix translation diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1449,11 +1449,15 @@ try: buf = w_obj.buffer_w(self, 0) except TypeError: - try: - buf = w_obj.readbuf_w(self) - except TypeError: - self._getarg_error("string or buffer", w_obj) - return buf.as_str() + pass + else: + return buf.as_str() + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + else: + return buf.as_str() def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): From noreply at buildbot.pypy.org Thu Apr 24 05:36:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 05:36:24 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: reorder Message-ID: <20140424033624.8980F1C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70915:53a0d2a1bfde Date: 2014-04-23 22:19 -0400 http://bitbucket.org/pypy/pypy/changeset/53a0d2a1bfde/ Log: reorder diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1372,7 +1372,7 @@ BUF_WRITABLE = 4 def check_buf_flags(self, flags, readonly): - if flags & self.BUF_WRITABLE == self.BUF_WRITABLE and readonly: + if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: raise oefmt(self.w_BufferError, "Object is not writable.") def buffer_w(self, w_obj, flags): From noreply at buildbot.pypy.org Thu Apr 24 05:36:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 05:36:25 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix usage of bufferstr_w in unicodeobject Message-ID: <20140424033625.C03FC1C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70916:da264ab81348 Date: 2014-04-23 22:54 -0400 http://bitbucket.org/pypy/pypy/changeset/da264ab81348/ Log: fix usage of bufferstr_w in unicodeobject diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -448,12 +448,12 @@ if errors is None or errors == 'strict': if encoding == 'ascii': # XXX error handling - s = space.bufferstr_w(w_obj) + s = space.charbuf_w(w_obj) eh = unicodehelper.decode_error_handler(space) return space.wrap(str_decode_ascii( s, len(s), None, final=True, errorhandler=eh)[0]) if encoding == 'utf-8': - s = space.bufferstr_w(w_obj) + s = space.charbuf_w(w_obj) eh = unicodehelper.decode_error_handler(space) return space.wrap(str_decode_utf_8( s, len(s), None, final=True, errorhandler=eh, From noreply at buildbot.pypy.org Thu Apr 24 05:36:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 05:36:26 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix usage of bufferstr_w in _io Message-ID: <20140424033626.F3EAC1C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70917:48ee80cfca6b Date: 2014-04-23 23:20 -0400 http://bitbucket.org/pypy/pypy/changeset/48ee80cfca6b/ Log: fix usage of bufferstr_w in _io diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1416,7 +1416,20 @@ @specialize.arg(1) def getarg_w(self, code, w_obj): - if code == 'w*': + if code == 's*': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.readbuf_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).readbuf_w(self) + try: + return w_obj.buffer_w(self, 0) + except TypeError: + pass + try: + return w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + elif code == 'w*': try: try: return w_obj.buffer_w(self, self.BUF_WRITABLE) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -701,7 +701,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -50,10 +50,7 @@ def write_w(self, space, w_data): self._check_closed(space) - if space.isinstance_w(w_data, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - "bytes string of buffer expected")) - buf = space.bufferstr_w(w_data) + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -333,7 +333,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() try: n = os.write(self.fd, data) From noreply at buildbot.pypy.org Thu Apr 24 05:36:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 05:36:28 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix test_ztranslations Message-ID: <20140424033628.2B60C1C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70918:6138264b2697 Date: 2014-04-23 23:25 -0400 http://bitbucket.org/pypy/pypy/changeset/6138264b2697/ Log: fix test_ztranslations diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -40,6 +40,9 @@ def setclass(self, space, w_subtype): is_root(w_subtype) + def buffer_w(self, space, flags): + return Buffer() + def str_w(self, space): return NonConstant("foobar") @@ -296,14 +299,6 @@ ec._py_repr = None return ec - def readbuf_w(self, w_obj): - is_root(w_obj) - return Buffer() - - def writebuf_w(self, w_obj): - is_root(w_obj) - return Buffer() - def unicode_from_object(self, w_obj): return w_some_obj() From noreply at buildbot.pypy.org Thu Apr 24 05:36:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 05:36:29 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix usage of bufferstr_w in _file Message-ID: <20140424033629.597031C06C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70919:b0b180cf068a Date: 2014-04-23 23:33 -0400 http://bitbucket.org/pypy/pypy/changeset/b0b180cf068a/ Log: fix usage of bufferstr_w in _file diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -267,9 +267,14 @@ def direct_write(self, w_data): space = self.space - if not self.binary and space.isinstance_w(w_data, space.w_unicode): - w_data = space.call_method(w_data, "encode", space.wrap(self.encoding), space.wrap(self.errors)) - data = space.bufferstr_w(w_data) + if self.binary: + data = space.getarg_w('s*', w_data).as_str() + else: + if space.isinstance_w(w_data, space.w_unicode): + w_data = space.call_method(w_data, "encode", + space.wrap(self.encoding), + space.wrap(self.errors)) + data = space.charbuf_w(w_data) self.do_direct_write(data) def do_direct_write(self, data): From noreply at buildbot.pypy.org Thu Apr 24 08:19:42 2014 From: noreply at buildbot.pypy.org (dalcinl) Date: Thu, 24 Apr 2014 08:19:42 +0200 (CEST) Subject: [pypy-commit] cffi default: CPython: Add explicit typecasts in _cffi_to_c_int() Message-ID: <20140424061942.643AA1C02F2@cobra.cs.uni-duesseldorf.de> Author: Lisandro Dalcin Branch: Changeset: r1505:6585558c79af Date: 2014-04-23 18:58 +0300 http://bitbucket.org/cffi/cffi/changeset/6585558c79af/ Log: CPython: Add explicit typecasts in _cffi_to_c_int() - Silent GCC -Wsign-compare warning: signed and unsigned type in conditional expression diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -824,14 +824,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ From noreply at buildbot.pypy.org Thu Apr 24 09:40:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 09:40:33 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: clean up buffer/memoryview setitem Message-ID: <20140424074033.1620B1C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70920:fc458f88faff Date: 2014-04-24 01:44 -0400 http://bitbucket.org/pypy/pypy/changeset/fc458f88faff/ Log: clean up buffer/memoryview setitem diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -13,3 +13,15 @@ assert b[-1] == '*' assert b[-2] == '-' assert b[-3] == '+' + exc = raises(TypeError, "b[3] = 'abc'") + assert str(exc.value) == "right operand must be a single byte" + exc = raises(TypeError, "b[3:5] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + exc = raises(TypeError, "b[3:7:2] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + + b = bytebuffer(10) + b[1:3] = 'xy' + assert str(b) == "\x00xy" + "\x00" * 7 + b[4:8:2] = 'zw' + assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -10,7 +10,6 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.objectmodel import compute_hash from rpython.rlib.rstring import StringBuilder -from pypy.objspace.std.memoryobject import _buffer_setitem class W_Buffer(W_Root): @@ -71,12 +70,26 @@ res = self.buf.getslice(start, stop, step, size) return space.wrap(res) - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): + def descr_setitem(self, space, w_index, w_obj): if not self.buf.is_writable(): raise OperationError(space.w_TypeError, space.wrap("buffer is read-only")) - _buffer_setitem(space, self.buf, w_index, newstring) + start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + value = space.readbuf_w(w_obj) + if step == 0: # index only + if value.getlength() != 1: + msg = "right operand must be a single byte" + raise OperationError(space.w_TypeError, space.wrap(msg)) + self.buf.setitem(start, value.getitem(0)) + else: + if value.getlength() != size: + msg = "right operand length must match slice length" + raise OperationError(space.w_TypeError, space.wrap(msg)) + if step == 1: + self.buf.setslice(start, value.as_str()) + else: + for i in range(size): + self.buf.setitem(start + i * step, value.getitem(i)) def descr_str(self, space): return space.wrap(self.buf.as_str()) diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -10,25 +10,6 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty -def _buffer_setitem(space, buf, w_index, newstring): - start, stop, step, size = space.decode_index4(w_index, buf.getlength()) - if step == 0: # index only - if len(newstring) != 1: - msg = 'buffer[index]=x: x must be a single character' - raise OperationError(space.w_TypeError, space.wrap(msg)) - char = newstring[0] # annotator hint - buf.setitem(start, char) - elif step == 1: - if len(newstring) != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_ValueError, space.wrap(msg)) - buf.setslice(start, newstring) - else: - raise OperationError(space.w_ValueError, - space.wrap("buffer object does not support" - " slicing with a step")) - - class W_MemoryView(W_Root): """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. @@ -40,7 +21,8 @@ self.buf = buf def buffer_w(self, space, flags): - return space.buffer_w(self.obj, flags) + space.check_buf_flags(flags, self.buf.readonly) + return self.buf @staticmethod def descr_new_memoryview(space, w_subtype, w_object): @@ -101,22 +83,28 @@ def descr_getitem(self, space, w_index): start, stop, step = space.decode_index(w_index, self.getlength()) + if step not in (0, 1): + raise OperationError(space.w_NotImplementedError, space.wrap("")) if step == 0: # index only return space.wrap(self.buf.getitem(start)) + res = self.getslice(start, stop) + return space.wrap(res) + + def descr_setitem(self, space, w_index, w_obj): + if not self.buf.is_writable(): + raise OperationError(space.w_TypeError, space.wrap( + "cannot modify read-only memory")) + start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + if step not in (0, 1): + raise OperationError(space.w_NotImplementedError, space.wrap("")) + value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) + if value.getlength() != size: + raise OperationError(space.w_ValueError, space.wrap( + "cannot modify size of memoryview object")) + if step == 0: # index only + self.buf.setitem(start, value.getitem(0)) elif step == 1: - res = self.getslice(start, stop) - return space.wrap(res) - else: - raise OperationError(space.w_ValueError, - space.wrap("memoryview object does not support" - " slicing with a step")) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not self.buf.is_writable(): - raise OperationError(space.w_TypeError, - space.wrap("cannot modify read-only memory")) - _buffer_setitem(space, self.buf, w_index, newstring) + self.buf.setslice(start, value.as_str()) def descr_len(self, space): return space.wrap(self.buf.getlength()) diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -7,11 +7,14 @@ assert v.tolist() == [97, 98, 99] assert v[1] == "b" assert v[-1] == "c" - raises(TypeError, "v[1] = 'x'") + exc = raises(TypeError, "v[1] = 'x'") + assert str(exc.value) == "cannot modify read-only memory" assert v.readonly is True w = v[1:234] assert isinstance(w, memoryview) assert len(w) == 2 + exc = raises(NotImplementedError, "v[0:2:2]") + assert str(exc.value) == "" def test_rw(self): data = bytearray('abcefg') @@ -21,7 +24,12 @@ assert data == bytearray(eval("b'zbcefg'")) v[1:4] = '123' assert data == bytearray(eval("b'z123fg'")) - raises((ValueError, TypeError), "v[2] = 'spam'") + v[0:3] = v[2:5] + assert data == bytearray(eval("b'23f3fg'")) + exc = raises(ValueError, "v[2] = 'spam'") + assert str(exc.value) == "cannot modify size of memoryview object" + exc = raises(NotImplementedError, "v[0:2:2] = 'spam'") + assert str(exc.value) == "" def test_memoryview_attrs(self): v = memoryview("a"*100) From noreply at buildbot.pypy.org Thu Apr 24 09:40:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 09:40:34 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix buffer conversion when readonly Message-ID: <20140424074034.41AA81C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70921:1e39f97163c6 Date: 2014-04-24 01:49 -0400 http://bitbucket.org/pypy/pypy/changeset/1e39f97163c6/ Log: fix buffer conversion when readonly diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,8 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, f.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -97,6 +97,8 @@ a2 = bytearray('testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, b.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -135,6 +135,8 @@ a = bytearray('x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, f.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -22,6 +22,7 @@ self.buf = buf def buffer_w(self, space, flags): + space.check_buf_flags(flags, self.buf.readonly) return self.buf def readbuf_w(self, space): From noreply at buildbot.pypy.org Thu Apr 24 09:40:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 09:40:35 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix _socket recv_into Message-ID: <20140424074035.721C11C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70922:1b134cf7acc4 Date: 2014-04-24 01:55 -0400 http://bitbucket.org/pypy/pypy/changeset/1b134cf7acc4/ Log: fix _socket recv_into diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -419,7 +419,7 @@ @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.writebuf_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt @@ -430,7 +430,7 @@ @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.writebuf_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -682,6 +682,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes = cli.recv_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_recvfrom_into(self): import socket import array @@ -697,6 +704,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes, addr = cli.recvfrom_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) From noreply at buildbot.pypy.org Thu Apr 24 09:40:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 09:40:36 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: unicodeobject has old buffer interface Message-ID: <20140424074036.A7E171C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70923:bd46658a05eb Date: 2014-04-24 03:38 -0400 http://bitbucket.org/pypy/pypy/changeset/bd46658a05eb/ Log: unicodeobject has old buffer interface diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -25,3 +25,5 @@ assert str(b) == "\x00xy" + "\x00" * 7 b[4:8:2] = 'zw' assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 + b[6:10] = u'#' + assert str(b) == "\x00xy\x00z\x00#" + "\x00" * 3 diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,8 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" exc = raises(TypeError, f.readinto, buffer(b"hello")) assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, f.readinto, memoryview(b"hello")) @@ -239,7 +241,8 @@ import _io raw = _io.FileIO(self.tmpfile, 'w') f = _io.BufferedWriter(raw) - f.write("abcd") + f.write("ab") + f.write(u"cd") f.close() assert self.readfile() == "abcd" diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -38,6 +38,8 @@ f = _io.BytesIO() assert f.write("") == 0 assert f.write("hello") == 5 + exc = raises(TypeError, f.write, u"lo") + assert str(exc.value) == "'unicode' does not have the buffer interface" import gc; gc.collect() assert f.getvalue() == "hello" f.close() @@ -97,6 +99,8 @@ a2 = bytearray('testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" exc = raises(TypeError, b.readinto, buffer(b"hello")) assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, b.readinto, memoryview(b"hello")) diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -82,7 +82,8 @@ import _io filename = self.tmpfile + '_w' f = _io.FileIO(filename, 'wb') - f.write("test") + f.write("te") + f.write(u"st") # try without flushing f2 = _io.FileIO(filename, 'rb') assert f2.read() == "test" @@ -135,6 +136,8 @@ a = bytearray('x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" exc = raises(TypeError, f.readinto, buffer(b"hello")) assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, f.readinto, memoryview(b"hello")) diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -37,18 +37,7 @@ @staticmethod @unwrap_spec(offset=int, size=int) def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - buf = StringBuffer(builder.build()) - else: - buf = space.readbuf_w(w_object) - + buf = space.readbuf_w(w_object) if offset == 0 and size == -1: return W_Buffer(buf) # handle buffer slices diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -2,7 +2,8 @@ from rpython.rlib.objectmodel import ( compute_hash, compute_unique_id, import_from_mixin) -from rpython.rlib.rstring import UnicodeBuilder +from rpython.rlib.buffer import StringBuffer +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.runicode import ( make_unicode_escape_function, str_decode_ascii, str_decode_utf_8, unicode_encode_ascii, unicode_encode_utf_8) @@ -64,6 +65,17 @@ def unicode_w(self, space): return self._value + def readbuf_w(self, space): + from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE + builder = StringBuilder(len(self._value) * UNICODE_SIZE) + for unich in self._value: + pack_unichar(unich, builder) + return StringBuffer(builder.build()) + + def writebuf_w(self, space): + raise OperationError(space.w_TypeError, space.wrap( + "cannot use unicode as modifiable buffer")) + def listview_unicode(w_self): return _create_list_from_unicode(w_self._value) From noreply at buildbot.pypy.org Thu Apr 24 09:40:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 09:40:37 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: correct _codecs.{char, read}buffer_encode Message-ID: <20140424074037.E2B981C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70924:1569ceab7693 Date: 2014-04-24 03:20 -0400 http://bitbucket.org/pypy/pypy/changeset/1569ceab7693/ Log: correct _codecs.{char,read}buffer_encode diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1411,7 +1411,7 @@ if self.is_none(w_obj): name = "None" else: - name = self.type(w_obj).getname(self) + name = self.type(w_obj).get_module_type_name() raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) @specialize.arg(1) @@ -1429,6 +1429,15 @@ return w_obj.readbuf_w(self) except TypeError: self._getarg_error("string or buffer", w_obj) + elif code == 's#': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.str_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).str_w(self) + try: + return w_obj.readbuf_w(self).as_str() + except TypeError: + self._getarg_error("string or read-only buffer") elif code == 'w*': try: try: @@ -1441,6 +1450,11 @@ return w_obj.writebuf_w(self) except TypeError: self._getarg_error("read-write buffer", w_obj) + elif code == 't#': + try: + return w_obj.charbuf_w(self) + except TypeError: + self._getarg_error("string or read-only character buffer", w_obj) else: assert False diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -72,8 +72,8 @@ 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', - 'charbuffer_encode': 'interp_codecs.buffer_encode', - 'readbuffer_encode': 'interp_codecs.buffer_encode', + 'charbuffer_encode': 'interp_codecs.charbuffer_encode', + 'readbuffer_encode': 'interp_codecs.readbuffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', 'charmap_encode' : 'interp_codecs.charmap_encode', 'escape_encode' : 'interp_codecs.escape_encode', diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -321,8 +321,14 @@ w_res = space.call_function(w_encoder, w_obj, space.wrap(errors)) return space.getitem(w_res, space.wrap(0)) - at unwrap_spec(s='bufferstr', errors='str_or_None') -def buffer_encode(space, s, errors='strict'): + at unwrap_spec(errors='str_or_None') +def readbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('s#', w_data) + return space.newtuple([space.wrap(s), space.wrap(len(s))]) + + at unwrap_spec(errors='str_or_None') +def charbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('t#', w_data) return space.newtuple([space.wrap(s), space.wrap(len(s))]) @unwrap_spec(errors=str) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -420,9 +420,13 @@ for (i, line) in enumerate(reader): assert line == s[i] - def test_array(self): + def test_buffer_encode(self): import _codecs, array - _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + assert _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + exc = raises(TypeError, _codecs.charbuffer_encode, array.array('c', 'spam')) + assert str(exc.value) == "must be string or read-only character buffer, not array.array" + assert _codecs.readbuffer_encode(u"test") == ('test', 4) + assert _codecs.charbuffer_encode(u"test") == ('test', 4) def test_utf8sig(self): import codecs diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -73,6 +73,9 @@ def get_module(self): return w_some_obj() + def get_module_type_name(self): + return self.name + def w_some_obj(): if NonConstant(False): return W_Root() diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -76,6 +76,8 @@ raise OperationError(space.w_TypeError, space.wrap( "cannot use unicode as modifiable buffer")) + charbuf_w = str_w + def listview_unicode(w_self): return _create_list_from_unicode(w_self._value) From noreply at buildbot.pypy.org Thu Apr 24 09:50:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 09:50:50 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix translation Message-ID: <20140424075050.982491C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70925:9859bb1e1b5e Date: 2014-04-24 03:50 -0400 http://bitbucket.org/pypy/pypy/changeset/9859bb1e1b5e/ Log: fix translation diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1437,7 +1437,7 @@ try: return w_obj.readbuf_w(self).as_str() except TypeError: - self._getarg_error("string or read-only buffer") + self._getarg_error("string or read-only buffer", w_obj) elif code == 'w*': try: try: From noreply at buildbot.pypy.org Thu Apr 24 10:17:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 10:17:22 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix cffi MiniBuffer setslice exception type Message-ID: <20140424081722.DA2D71C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70926:efb387f6f091 Date: 2014-04-24 04:16 -0400 http://bitbucket.org/pypy/pypy/changeset/efb387f6f091/ Log: fix cffi MiniBuffer setslice exception type diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray @@ -47,6 +47,14 @@ W_Buffer.__init__(self, buffer) self.keepalive = keepalive + def descr_setitem(self, space, w_index, w_obj): + try: + W_Buffer.descr_setitem(self, space, w_index, w_obj) + except OperationError as e: + if e.match(space, space.w_TypeError): + e.w_type = space.w_ValueError + raise + MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", From noreply at buildbot.pypy.org Thu Apr 24 13:17:06 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 24 Apr 2014 13:17:06 +0200 (CEST) Subject: [pypy-commit] benchmarks default: shutdown thread pools at the end of benchmark runs Message-ID: <20140424111706.9BCD31C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r252:f232348bc821 Date: 2014-04-24 13:18 +0200 http://bitbucket.org/pypy/benchmarks/changeset/f232348bc821/ Log: shutdown thread pools at the end of benchmark runs diff --git a/multithread/btree/btree.py b/multithread/btree/btree.py --- a/multithread/btree/btree.py +++ b/multithread/btree/btree.py @@ -347,6 +347,9 @@ # print "tree:" # print tree + # shutdown current pool + set_thread_pool(None) + diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -49,6 +49,8 @@ def set_thread_pool(th): global _thread_pool + if _thread_pool: + _thread_pool.shutdown() _thread_pool = th diff --git a/multithread/mandelbrot/mandelbrot.py b/multithread/mandelbrot/mandelbrot.py --- a/multithread/mandelbrot/mandelbrot.py +++ b/multithread/mandelbrot/mandelbrot.py @@ -1,4 +1,4 @@ -from common.abstract_threading import Future, atomic +from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool import sys @@ -59,6 +59,7 @@ br, bi = 1.0, 1.5 width, height = 4096, 4096 + set_thread_pool(ThreadPool(threads)) step = (bi - ai) / threads res = [] ai = -1.5 @@ -71,6 +72,8 @@ )) res = [f() for f in res] + + set_thread_pool(None) return merge_imgs(res) diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -165,6 +165,10 @@ print f() del futures[:] + # shutdown current pool + set_thread_pool(None) + + if __name__ == '__main__': run() diff --git a/multithread/skiplist/skiplist.py b/multithread/skiplist/skiplist.py --- a/multithread/skiplist/skiplist.py +++ b/multithread/skiplist/skiplist.py @@ -130,6 +130,10 @@ # print "list:" # slist.printList() + # shutdown current pool + set_thread_pool(None) + + From noreply at buildbot.pypy.org Thu Apr 24 13:45:30 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 24 Apr 2014 13:45:30 +0200 (CEST) Subject: [pypy-commit] stmgc default: do not signal a transaction to commit more than once. this is necessary to not Message-ID: <20140424114530.54B411C3512@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1181:7713ed439985 Date: 2014-04-24 13:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/7713ed439985/ Log: do not signal a transaction to commit more than once. this is necessary to not hurt performance (if this mechanism turns out to be useful at all) diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -263,7 +263,10 @@ /* we should commit soon, we caused an abort */ //signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num)); - stmcb_commit_soon(); + if (!STM_PSEGMENT->signalled_to_commit_soon) { + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + } } } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -192,6 +192,7 @@ assert(STM_PSEGMENT->transaction_state == TS_NONE); change_timing_state(STM_TIME_RUN_CURRENT); STM_PSEGMENT->start_time = tl->_timing_cur_start; + STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -162,6 +162,9 @@ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; + /* Already signalled to commit soon: */ + bool signalled_to_commit_soon; + /* For debugging */ #ifndef NDEBUG pthread_t running_pthread; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -269,9 +269,11 @@ assert(_has_mutex()); /* never overwrite abort signals or safepoint requests (too messy to deal with) */ - if (!is_abort(other_pseg->pub.nursery_end) - && !pause_signalled) + if (!other_pseg->signalled_to_commit_soon + && !is_abort(other_pseg->pub.nursery_end) + && !pause_signalled) { other_pseg->pub.nursery_end = NSE_SIGCOMMITSOON; + } } static void signal_everybody_to_pause_running(void) @@ -342,6 +344,7 @@ previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); } + STM_PSEGMENT->signalled_to_commit_soon = true; stmcb_commit_soon(); if (!pause_signalled) { STM_SEGMENT->nursery_end = NURSERY_END; From noreply at buildbot.pypy.org Thu Apr 24 16:57:32 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Thu, 24 Apr 2014 16:57:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: reenabled automatic configuration of stm in targetimageloadingsmalltalk Message-ID: <20140424145732.E770A1D23E4@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: Changeset: r795:a12c5ad93421 Date: 2014-04-24 15:56 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a12c5ad93421/ Log: reenabled automatic configuration of stm in targetimageloadingsmalltalk diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -223,9 +223,8 @@ # driver.config.translation.gcrootfinder = "stm" from rpython.rlib import rgc if hasattr(rgc, "stm_is_enabled"): - pass - #driver.config.translation.stm = True - #driver.config.translation.thread = True + driver.config.translation.stm = True + driver.config.translation.thread = True return entry_point, None From noreply at buildbot.pypy.org Thu Apr 24 19:05:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 19:05:48 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: simplify buffer.readonly/is_writable() Message-ID: <20140424170548.332641D23E1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70927:1b961b9e0f74 Date: 2014-04-24 04:29 -0400 http://bitbucket.org/pypy/pypy/changeset/1b961b9e0f74/ Log: simplify buffer.readonly/is_writable() diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1218,7 +1218,7 @@ return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, w_subtype=w_subtype, w_base=w_buffer, - writable=buf.is_writable()) + writable=not buf.readonly) order = order_converter(space, w_order, NPY.CORDER) if order == NPY.CORDER: diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -61,7 +61,7 @@ return space.wrap(res) def descr_setitem(self, space, w_index, w_obj): - if not self.buf.is_writable(): + if self.buf.readonly: raise OperationError(space.w_TypeError, space.wrap("buffer is read-only")) start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) @@ -117,10 +117,10 @@ return space.call_method(w_string, '__mul__', w_times) def descr_repr(self, space): - if self.buf.is_writable(): + if self.buf.readonly: + info = 'read-only buffer' + else: info = 'read-write buffer' - else: - info = 'read-only buffer' addrstring = self.getaddrstring(space) return space.wrap("<%s for 0x%s, size %d>" % diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -91,7 +91,7 @@ return space.wrap(res) def descr_setitem(self, space, w_index, w_obj): - if not self.buf.is_writable(): + if self.buf.readonly: raise OperationError(space.w_TypeError, space.wrap( "cannot modify read-only memory")) start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) @@ -119,7 +119,7 @@ return space.wrap(1) def w_is_readonly(self, space): - return space.wrap(not self.buf.is_writable()) + return space.wrap(self.buf.readonly) def w_get_shape(self, space): return space.newtuple([space.wrap(self.getlength())]) diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -37,9 +37,6 @@ def get_raw_address(self): raise ValueError("no raw buffer") - def is_writable(self): - return not self.readonly - class StringBuffer(Buffer): __slots__ = ['value'] From noreply at buildbot.pypy.org Thu Apr 24 19:05:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 19:05:49 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: test/fix struct pack_into/unpack_from behavior Message-ID: <20140424170549.819B41D23E1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70928:c25773816a8a Date: 2014-04-24 13:04 -0400 http://bitbucket.org/pypy/pypy/changeset/c25773816a8a/ Log: test/fix struct pack_into/unpack_from behavior diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1416,6 +1416,10 @@ @specialize.arg(1) def getarg_w(self, code, w_obj): + if code == 'z*': + if self.is_none(w_obj): + return None + code = 's*' if code == 's*': if self.isinstance_w(w_obj, self.w_str): return w_obj.readbuf_w(self) diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -48,13 +48,13 @@ interpleveldefs = { 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', + 'pack_into': 'interp_struct.pack_into', 'unpack': 'interp_struct.unpack', + 'unpack_from': 'interp_struct.unpack_from', 'Struct': 'interp_struct.W_Struct', } appleveldefs = { 'error': 'app_struct.error', - 'pack_into': 'app_struct.pack_into', - 'unpack_from': 'app_struct.unpack_from', } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -2,23 +2,8 @@ """ Application-level definitions for the struct module. """ -import struct class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" - -# XXX inefficient -def pack_into(fmt, buf, offset, *args): - data = struct.pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -# XXX inefficient -def unpack_from(fmt, buf, offset=0): - size = struct.calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return struct.unpack(fmt, data) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -5,7 +5,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.module.struct.formatiterator import ( PackFormatIterator, UnpackFormatIterator @@ -29,6 +29,7 @@ raise OperationError(w_error, space.wrap(e.msg)) return fmtiter.totalsize + @unwrap_spec(format=str) def pack(space, format, args_w): if jit.isconstant(format): @@ -47,6 +48,23 @@ return space.wrap(fmtiter.result.build()) +# XXX inefficient + at unwrap_spec(format=str, offset=int) +def pack_into(space, format, w_buf, offset, args_w): + res = pack(space, format, args_w).str_w(space) + buf = space.writebuf_w(w_buf) + if offset < 0: + offset += buf.getlength() + size = len(res) + if offset < 0 or (buf.getlength() - offset) < size: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, + "pack_into requires a buffer of at least %d bytes", + size) + buf.setslice(offset, res) + + @unwrap_spec(format=str, input='bufferstr') def unpack(space, format, input): fmtiter = UnpackFormatIterator(space, input) @@ -61,6 +79,27 @@ return space.newtuple(fmtiter.result_w[:]) +# XXX inefficient + at unwrap_spec(format=str, offset=int) +def unpack_from(space, format, w_buf, offset=0): + size = _calcsize(space, format) + buf = space.getarg_w('z*', w_buf) + if buf is None: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, "unpack_from requires a buffer argument") + if offset < 0: + offset += buf.getlength() + if offset < 0 or (buf.getlength() - offset) < size: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, + "unpack_from requires a buffer of at least %d bytes", + size) + data = buf.getslice(offset, offset + size, 1, size) + return unpack(space, format, data) + + class W_Struct(W_Root): _immutable_fields_ = ["format", "size"] diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -2,12 +2,11 @@ Tests for the struct module implemented at interp-level in pypy/module/struct. """ -import py from rpython.rlib.rstruct.nativefmttable import native_is_bigendian class AppTestStruct(object): - spaceconfig = dict(usemodules=['struct']) + spaceconfig = dict(usemodules=['struct', 'array']) def setup_class(cls): """ @@ -26,7 +25,6 @@ """ assert issubclass(self.struct.error, Exception) - def test_calcsize_standard(self): """ Check the standard size of the various format characters. @@ -52,14 +50,12 @@ # test with some repetitions and multiple format characters assert calcsize('=bQ3i') == 1 + 8 + 3*4 - def test_index(self): class X(object): def __index__(self): return 3 assert self.struct.unpack("i", self.struct.pack("i", X()))[0] == 3 - def test_deprecation_warning(self): import warnings for code in 'b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q': @@ -70,7 +66,6 @@ assert str(w[0].message) == "integer argument expected, got non-integer" assert w[0].category is DeprecationWarning - def test_pack_standard_little(self): """ Check packing with the '<' format specifier. @@ -84,7 +79,6 @@ assert pack("' format specifier. @@ -112,7 +105,6 @@ assert pack(">q", -0x41B2B3B4B5B6B7B8) == '\xbeMLKJIHH' assert pack(">Q", 0x8142434445464748) == '\x81BCDEFGH' - def test_unpack_standard_big(self): """ Check unpacking with the '>' format specifier. @@ -126,7 +118,6 @@ assert unpack(">q", '\xbeMLKJIHH') == (-0x41B2B3B4B5B6B7B8,) assert unpack(">Q", '\x81BCDEFGH') == (0x8142434445464748,) - def test_calcsize_native(self): """ Check that the size of the various format characters is reasonable. @@ -156,7 +147,6 @@ assert calcsize('ibb') == calcsize('i') + 2 * calcsize('b') assert calcsize('ih') == calcsize('i') + calcsize('h') - def test_pack_native(self): """ Check packing with the native format. @@ -174,7 +164,6 @@ assert res[sizeofi:] == '\x05' + '\x00' * (sizeofi-1) assert pack("q", -1) == '\xff' * calcsize("q") - def test_unpack_native(self): """ Check unpacking with the native format. @@ -185,7 +174,6 @@ assert unpack("bi", pack("bi", -2, 5)) == (-2, 5) assert unpack("q", '\xff' * calcsize("q")) == (-1,) - def test_string_format(self): """ Check the 's' format character. @@ -200,7 +188,6 @@ assert unpack("5s3s", "worldspa") == ("world", "spa") assert unpack("0s", "") == ("",) - def test_pascal_format(self): """ Check the 'p' format character. @@ -220,7 +207,6 @@ assert unpack("1p", "\x03") == ("",) assert unpack("300p", longpacked300) == (longstring[:255],) - def test_char_format(self): """ Check the 'c' format character. @@ -232,7 +218,6 @@ assert unpack("c", "?") == ("?",) assert unpack("5c", "a\xc0\x00\n-") == ("a", "\xc0", "\x00", "\n", "-") - def test_pad_format(self): """ Check the 'x' format character. @@ -244,7 +229,6 @@ assert unpack("x", "?") == () assert unpack("5x", "hello") == () - def test_native_floats(self): """ Check the 'd' and 'f' format characters on native packing. @@ -261,7 +245,6 @@ assert res != 12.34 # precision lost assert abs(res - 12.34) < 1E-6 - def test_standard_floats(self): """ Check the 'd' and 'f' format characters on standard packing. @@ -280,7 +263,6 @@ def test_bool(self): pack = self.struct.pack - unpack = self.struct.unpack assert pack("!?", True) == '\x01' assert pack(">?", True) == '\x01' assert pack("!?", False) == '\x00' @@ -343,15 +325,12 @@ raises(error, pack, "b", 150) # argument out of range # XXX the accepted ranges still differs between PyPy and CPython - def test_overflow_error(self): """ Check OverflowError cases. """ import sys calcsize = self.struct.calcsize - pack = self.struct.pack - unpack = self.struct.unpack someerror = (OverflowError, self.struct.error) raises(someerror, calcsize, "%dc" % (sys.maxint+1,)) raises(someerror, calcsize, "999999999999999999999999999c") @@ -360,7 +339,6 @@ raises(someerror, calcsize, "c%dc" % (sys.maxint,)) raises(someerror, calcsize, "%dci" % (sys.maxint,)) - def test_unicode(self): """ A PyPy extension: accepts the 'u' format character in native mode, @@ -374,7 +352,6 @@ assert data == str(buffer(u'XYZ')) assert self.struct.unpack("uuu", data) == (u'X', u'Y', u'Z') - def test_unpack_buffer(self): """ Buffer objects can be passed to struct.unpack(). @@ -383,6 +360,34 @@ assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) + def test_pack_unpack_buffer(self): + import array + b = array.array('c', '\x00' * 19) + sz = self.struct.calcsize("ii") + for offset in [2, -17]: + self.struct.pack_into("ii", b, offset, 17, 42) + assert str(buffer(b)) == ('\x00' * 2 + + self.struct.pack("ii", 17, 42) + + '\x00' * (19-sz-2)) + exc = raises(TypeError, self.struct.pack_into, "ii", 'test', 0, 17, 42) + assert str(exc.value) == "Cannot use string as modifiable buffer" + exc = raises(self.struct.error, self.struct.pack_into, "ii", b[0:1], 0, 17, 42) + assert str(exc.value) == "pack_into requires a buffer of at least 8 bytes" + + assert self.struct.unpack_from("ii", b, 2) == (17, 42) + assert self.struct.unpack_from("ii", b, -17) == (17, 42) + assert self.struct.unpack_from("ii", buffer(b, 2)) == (17, 42) + assert self.struct.unpack_from("ii", buffer(b), 2) == (17, 42) + assert self.struct.unpack_from("ii", memoryview(buffer(b)), 2) == (17, 42) + exc = raises(TypeError, self.struct.unpack_from, "ii", 123) + assert 'must be string or buffer, not int' in str(exc.value) + exc = raises(self.struct.error, self.struct.unpack_from, "ii", None) + assert str(exc.value) == "unpack_from requires a buffer argument" + exc = raises(self.struct.error, self.struct.unpack_from, "ii", '') + assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" + exc = raises(self.struct.error, self.struct.unpack_from, "ii", memoryview('')) + assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" + def test___float__(self): class MyFloat(object): def __init__(self, x): diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -449,8 +449,11 @@ def readbuf_w(self, space): return StringBuffer(self._value) - def charbuf_w(self, space): - return self._value + def writebuf_w(self, space): + raise OperationError(space.w_TypeError, space.wrap( + "Cannot use string as modifiable buffer")) + + charbuf_w = str_w def listview_bytes(self): return _create_list_from_bytes(self._value) From noreply at buildbot.pypy.org Thu Apr 24 19:14:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 19:14:17 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: proper exception is now raised here Message-ID: <20140424171417.B88BB1C1008@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70929:c913c204f19d Date: 2014-04-24 13:13 -0400 http://bitbucket.org/pypy/pypy/changeset/c913c204f19d/ Log: proper exception is now raised here diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") From noreply at buildbot.pypy.org Thu Apr 24 21:37:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 21:37:39 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: cleanup Message-ID: <20140424193739.0C3E01C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70930:69b1380478f3 Date: 2014-04-24 13:55 -0400 http://bitbucket.org/pypy/pypy/changeset/69b1380478f3/ Log: cleanup diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -58,10 +58,10 @@ raise oefmt(space.w_IndexError, "bytearray index out of range") return space.wrap(ord(character)) - def _val(self, space): - return space.buffer_w(self, space.BUF_SIMPLE).as_str() + _val = charbuf_w - def _op_val(self, space, w_other): + @staticmethod + def _op_val(space, w_other): return space.buffer_w(w_other, space.BUF_SIMPLE).as_str() def _chr(self, char): diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -477,8 +477,7 @@ def _len(self): return len(self._value) - def _val(self, space): - return self._value + _val = str_w @staticmethod def _op_val(space, w_other): diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -100,10 +100,10 @@ def _len(self): return len(self._value) - def _val(self, space): - return self._value + _val = unicode_w - def _op_val(self, space, w_other): + @staticmethod + def _op_val(space, w_other): if isinstance(w_other, W_UnicodeObject): return w_other._value if space.isinstance_w(w_other, space.w_str): From noreply at buildbot.pypy.org Thu Apr 24 21:37:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 21:37:40 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix bytearray buffer readonly flag Message-ID: <20140424193740.4AB0A1C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70931:ad400df172a4 Date: 2014-04-24 15:13 -0400 http://bitbucket.org/pypy/pypy/changeset/ad400df172a4/ Log: fix bytearray buffer readonly flag diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -143,6 +143,8 @@ assert str(exc.value) == "cannot use unicode as modifiable buffer" exc = raises(TypeError, f.readinto, buffer(b"hello")) assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, f.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -103,6 +103,8 @@ assert str(exc.value) == "cannot use unicode as modifiable buffer" exc = raises(TypeError, b.readinto, buffer(b"hello")) assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, b.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -140,6 +140,8 @@ assert str(exc.value) == "cannot use unicode as modifiable buffer" exc = raises(TypeError, f.readinto, buffer(b"hello")) assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" exc = raises(TypeError, f.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -28,13 +28,13 @@ return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def buffer_w(self, space, flags): - return BytearrayBuffer(self.data) + return BytearrayBuffer(self.data, False) def readbuf_w(self, space): - return BytearrayBuffer(self.data) + return BytearrayBuffer(self.data, True) def writebuf_w(self, space): - return BytearrayBuffer(self.data) + return BytearrayBuffer(self.data, False) def charbuf_w(self, space): return ''.join(self.data) @@ -1131,9 +1131,9 @@ class BytearrayBuffer(Buffer): _immutable_ = True - def __init__(self, data): + def __init__(self, data, readonly): self.data = data - self.readonly = False + self.readonly = readonly def getlength(self): return len(self.data) diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -426,10 +426,10 @@ b = bytearray('abcdefghi') buf = buffer(b) assert buf[2] == 'c' - buf[3] = 'D' - assert b == 'abcDefghi' - buf[4:6] = 'EF' - assert b == 'abcDEFghi' + exc = raises(TypeError, "buf[2] = 'D'") + assert str(exc.value) == "buffer is read-only" + exc = raises(TypeError, "buf[4:6] = 'EF'") + assert str(exc.value) == "buffer is read-only" def test_decode(self): b = bytearray('abcdefghi') From noreply at buildbot.pypy.org Thu Apr 24 21:37:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 21:37:41 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix readonly check in buffer.writebuf_w Message-ID: <20140424193741.793D31C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70932:494600a98b1a Date: 2014-04-24 15:31 -0400 http://bitbucket.org/pypy/pypy/changeset/494600a98b1a/ Log: fix readonly check in buffer.writebuf_w diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -369,6 +369,8 @@ assert str(buffer(b)) == ('\x00' * 2 + self.struct.pack("ii", 17, 42) + '\x00' * (19-sz-2)) + exc = raises(TypeError, self.struct.pack_into, "ii", buffer(b), 0, 17, 42) + assert str(exc.value) == "buffer is read-only" exc = raises(TypeError, self.struct.pack_into, "ii", 'test', 0, 17, 42) assert str(exc.value) == "Cannot use string as modifiable buffer" exc = raises(self.struct.error, self.struct.pack_into, "ii", b[0:1], 0, 17, 42) diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -29,6 +29,9 @@ return self.buf def writebuf_w(self, space): + if self.buf.readonly: + raise OperationError(space.w_TypeError, space.wrap( + "buffer is read-only")) return self.buf def charbuf_w(self, space): From noreply at buildbot.pypy.org Thu Apr 24 21:37:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 21:37:42 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: cleanup fcntl/ioctl behavior Message-ID: <20140424193742.AD5991C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70933:2f1472af7614 Date: 2014-04-24 15:33 -0400 http://bitbucket.org/pypy/pypy/changeset/2f1472af7614/ Log: cleanup fcntl/ioctl behavior diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -1,6 +1,6 @@ from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, wrap_oserror, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -92,33 +92,27 @@ op = rffi.cast(rffi.INT, op) # C long => C int try: - intarg = space.int_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = fcntl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "fcntl") - return space.wrap(rv) - - try: - arg = space.bufferstr_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: ll_arg = rffi.str2charp(arg) - rv = fcntl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "fcntl") - return space.wrap(arg) + try: + rv = fcntl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "fcntl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - raise OperationError(space.w_TypeError, - space.wrap("int or string or buffer required")) + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = fcntl_int(fd, op, intarg) + if rv < 0: + raise _get_error(space, "fcntl") + return space.wrap(rv) @unwrap_spec(op=int) def flock(space, w_fd, op): @@ -207,50 +201,50 @@ fd = space.c_filedescriptor_w(w_fd) op = rffi.cast(rffi.INT, op) # C long => C int - if mutate_flag != 0: - try: - rwbuffer = space.writebuf_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - if mutate_flag > 0: - raise - else: - arg = rwbuffer.as_str() - ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "ioctl") - rwbuffer.setslice(0, arg) - return space.wrap(rv) - try: - intarg = space.int_w(w_arg) + rwbuffer = space.writebuf_w(w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = ioctl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "ioctl") - return space.wrap(rv) + arg = rwbuffer.as_str() + ll_arg = rffi.str2charp(arg) + try: + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + if mutate_flag != 0: + rwbuffer.setslice(0, arg) + return space.wrap(rv) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') + + if mutate_flag != -1: + raise OperationError(space.w_TypeError, space.wrap( + "ioctl requires a file or file descriptor, an integer " + "and optionally an integer or buffer argument")) try: - arg = space.bufferstr_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "ioctl") - return space.wrap(arg) + try: + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - raise OperationError(space.w_TypeError, - space.wrap("int or string or buffer required")) + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = ioctl_int(fd, op, intarg) + if rv < 0: + raise _get_error(space, "ioctl") + return space.wrap(rv) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -51,6 +51,8 @@ assert fcntl.fcntl(f, 1, 0) == 0 assert fcntl.fcntl(f, 2, "foo") == "foo" assert fcntl.fcntl(f, 2, buffer("foo")) == "foo" + exc = raises(TypeError, fcntl.fcntl, f, 2, memoryview("foo")) + assert 'integer' in str(exc.value) try: os.O_LARGEFILE @@ -226,6 +228,18 @@ assert res == 0 assert buf.tostring() == expected + buf = array.array('i', [0]) + res = fcntl.ioctl(mfd, TIOCGPGRP, buffer(buf)) + assert res == expected + assert buf.tostring() == '\x00' * 4 + + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview('abc')) + assert 'integer' in str(exc.value) + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, buffer(buf), False) + assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview('abc'), False) + assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" + res = fcntl.ioctl(mfd, TIOCGPGRP, buf, False) assert res == expected From noreply at buildbot.pypy.org Thu Apr 24 22:30:31 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 24 Apr 2014 22:30:31 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: render (c) literally, not as a copyright symbol Message-ID: <20140424203031.48D411C02F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r492:2cd907095b25 Date: 2014-04-24 23:11 +0300 http://bitbucket.org/pypy/pypy.org/changeset/2cd907095b25/ Log: render (c) literally, not as a copyright symbol diff --git a/numpydonate.html b/numpydonate.html --- a/numpydonate.html +++ b/numpydonate.html @@ -76,7 +76,7 @@ at the latest, we will try our best to make PyPy support NumPy anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a -501©(3) charitable organization incorporated in NY, USA, all funds will, +501(c)(3) charitable organization incorporated in NY, USA, all funds will, regardless of their use, be spent in a way that benefits the general public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase.

      diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -85,7 +85,7 @@ at the latest, we will try our best to make PyPy support Python 3 anyway. We however reserve the right to shift any unused funds to other PyPy activities when that date is reached. Of course, since the Conservancy is a -501©(3) charitable organization incorporated in NY, USA, all funds will, +501(c)(3) charitable organization incorporated in NY, USA, all funds will, regardless of their use, be spent in a way that benefits the general public, the advancement of Open Source and Free Software, and in particular the PyPy community and the PyPy codebase.

      From noreply at buildbot.pypy.org Thu Apr 24 22:30:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 24 Apr 2014 22:30:32 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: formatting Message-ID: <20140424203032.42CB41C02F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r493:f2d7cb004fcf Date: 2014-04-24 23:30 +0300 http://bitbucket.org/pypy/pypy.org/changeset/f2d7cb004fcf/ Log: formatting diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -21,7 +21,7 @@
    • -
    • diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -21,7 +21,7 @@
    • -
    • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -23,7 +23,7 @@
    • -
    • From noreply at buildbot.pypy.org Thu Apr 24 22:52:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:08 +0200 (CEST) Subject: [pypy-commit] pypy default: reduce diff with vendor/stdlib Message-ID: <20140424205208.6B6091C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70934:4f887c004422 Date: 2014-04-24 13:28 -0400 http://bitbucket.org/pypy/pypy/changeset/4f887c004422/ Log: reduce diff with vendor/stdlib diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) From noreply at buildbot.pypy.org Thu Apr 24 22:52:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:09 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: object reference no longer necessary here Message-ID: <20140424205209.B42F81C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70935:58c1b432afdb Date: 2014-04-24 15:43 -0400 http://bitbucket.org/pypy/pypy/changeset/58c1b432afdb/ Log: object reference no longer necessary here diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -15,9 +15,8 @@ an interp-level buffer. """ - def __init__(self, obj, buf): + def __init__(self, buf): assert isinstance(buf, Buffer) - self.obj = obj self.buf = buf def buffer_w(self, space, flags): @@ -26,7 +25,7 @@ @staticmethod def descr_new_memoryview(space, w_subtype, w_object): - return W_MemoryView(w_object, space.buffer_w(w_object, space.BUF_FULL_RO)) + return W_MemoryView(space.buffer_w(w_object, space.BUF_FULL_RO)) def _make_descr__cmp(name): def descr__cmp(self, space, w_other): @@ -69,7 +68,7 @@ if size < 0: size = 0 buf = SubBuffer(self.buf, start, size) - return W_MemoryView(self.obj, buf) + return W_MemoryView(buf) def descr_tobytes(self, space): return space.wrap(self.as_str()) From noreply at buildbot.pypy.org Thu Apr 24 22:52:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:10 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: flesh out buffer flags Message-ID: <20140424205210.C66271C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70936:bac2a6251789 Date: 2014-04-24 16:03 -0400 http://bitbucket.org/pypy/pypy/changeset/bac2a6251789/ Log: flesh out buffer flags diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1364,12 +1364,18 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - # XXX define these flags correctly, possibly put elsewhere? - BUF_SIMPLE = 0 - BUF_FULL_RO = 1 - BUF_CONTIG = 2 - BUF_CONTIG_RO = 3 - BUF_WRITABLE = 4 + BUF_SIMPLE = 0x0000 + BUF_WRITABLE = 0x0001 + BUF_FORMAT = 0x0004 + BUF_ND = 0x0008 + BUF_STRIDES = 0x0010 | BUF_ND + BUF_INDIRECT = 0x0100 | BUF_STRIDES + + BUF_CONTIG_RO = BUF_ND + BUF_CONTIG = BUF_ND | BUF_WRITABLE + + BUF_FULL_RO = BUF_INDIRECT | BUF_FORMAT + BUF_FULL = BUF_INDIRECT | BUF_FORMAT | BUF_WRITABLE def check_buf_flags(self, flags, readonly): if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: From noreply at buildbot.pypy.org Thu Apr 24 22:52:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:11 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix usage of bufferstr in cStringIO Message-ID: <20140424205211.F03AE1C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70937:a8c7805f00a3 Date: 2014-04-24 16:23 -0400 http://bitbucket.org/pypy/pypy/changeset/a8c7805f00a3/ Log: fix usage of bufferstr in cStringIO diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py --- a/pypy/module/cStringIO/interp_stringio.py +++ b/pypy/module/cStringIO/interp_stringio.py @@ -160,10 +160,10 @@ raise OperationError(space.w_IOError, space.wrap("negative size")) self.truncate(size) - @unwrap_spec(buffer='bufferstr') - def descr_write(self, buffer): + def descr_write(self, space, w_buffer): + buffer = space.getarg_w('s*', w_buffer) self.check_closed() - self.write(buffer) + self.write(buffer.as_str()) def descr_writelines(self, w_lines): self.check_closed() @@ -236,5 +236,5 @@ if space.is_none(w_string): return space.wrap(W_OutputType(space)) else: - string = space.bufferstr_w(w_string) + string = space.getarg_w('s*', w_string).as_str() return space.wrap(W_InputType(space, string)) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.sliceobject import W_SliceObject -from rpython.rlib.buffer import Buffer +from rpython.rlib.buffer import StringBuffer from rpython.rlib.objectmodel import instantiate, we_are_translated, specialize from rpython.rlib.nonconst import NonConstant from rpython.rlib.rarithmetic import r_uint, r_singlefloat @@ -41,7 +41,7 @@ is_root(w_subtype) def buffer_w(self, space, flags): - return Buffer() + return StringBuffer("foobar") def str_w(self, space): return NonConstant("foobar") From noreply at buildbot.pypy.org Thu Apr 24 22:52:13 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:13 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: replace usage of bufferstr_w in marshal Message-ID: <20140424205213.21D161C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70938:4027486ec885 Date: 2014-04-24 16:34 -0400 http://bitbucket.org/pypy/pypy/changeset/4027486ec885/ Log: replace usage of bufferstr_w in marshal diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -476,13 +476,7 @@ # Unmarshaller with inlined buffer string def __init__(self, space, w_str): Unmarshaller.__init__(self, space, None) - try: - self.bufstr = space.bufferstr_w(w_str) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, space.wrap( - 'marshal.loads() arg must be string or buffer')) + self.bufstr = space.getarg_w('s#', w_str) self.bufpos = 0 self.limit = len(self.bufstr) diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -14,6 +14,10 @@ print(repr(s)) x = marshal.loads(s) assert x == case and type(x) is type(case) + + exc = raises(TypeError, marshal.loads, memoryview(s)) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + f = StringIO.StringIO() marshal.dump(case, f) f.seek(0) From noreply at buildbot.pypy.org Thu Apr 24 22:52:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:14 +0200 (CEST) Subject: [pypy-commit] pypy default: fix marshal unknown type code message Message-ID: <20140424205214.3A06B1C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70939:18b81200f942 Date: 2014-04-24 16:38 -0400 http://bitbucket.org/pypy/pypy/changeset/18b81200f942/ Log: fix marshal unknown type code message diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -327,21 +327,8 @@ def invalid_typecode(space, u, tc): - # %r not supported in rpython - #u.raise_exc('invalid typecode in unmarshal: %r' % tc) - c = ord(tc) - if c < 16: - s = '\\x0%x' % c - elif c < 32 or c > 126: - s = '\\x%x' % c - elif tc == '\\': - s = r'\\' - else: - s = tc - q = "'" - if s[0] == "'": - q = '"' - u.raise_exc('invalid typecode in unmarshal: ' + q + s + q) + u.raise_exc("bad marshal data (unknown type code)") + def register(codes, func): """NOT_RPYTHON""" diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -14,11 +14,14 @@ print(repr(s)) x = marshal.loads(s) assert x == case and type(x) is type(case) - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case and type(x) is type(case) + + import sys + if '__pypy__' in sys.builtin_module_names: + f = StringIO.StringIO() + marshal.dump(case, f) + f.seek(0) + x = marshal.load(f) + assert x == case and type(x) is type(case) return x def test_None(self): @@ -191,7 +194,7 @@ def test_bad_typecode(self): import marshal exc = raises(ValueError, marshal.loads, chr(1)) - assert r"'\x01'" in exc.value.message + assert str(exc.value) == "bad marshal data (unknown type code)" class AppTestSmallLong(AppTestMarshal): From noreply at buildbot.pypy.org Thu Apr 24 22:52:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:15 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: merge default Message-ID: <20140424205215.785201C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70940:5313394cecd3 Date: 2014-04-24 16:39 -0400 http://bitbucket.org/pypy/pypy/changeset/5313394cecd3/ Log: merge default diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -881,8 +881,8 @@ def LOAD_NAME(self, nameindex, next_instr): if self.w_locals is not self.w_globals: - w_varname = self.getname_w(nameindex) - w_value = self.space.finditem(self.w_locals, w_varname) + varname = self.getname_u(nameindex) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -327,21 +327,8 @@ def invalid_typecode(space, u, tc): - # %r not supported in rpython - #u.raise_exc('invalid typecode in unmarshal: %r' % tc) - c = ord(tc) - if c < 16: - s = '\\x0%x' % c - elif c < 32 or c > 126: - s = '\\x%x' % c - elif tc == '\\': - s = r'\\' - else: - s = tc - q = "'" - if s[0] == "'": - q = '"' - u.raise_exc('invalid typecode in unmarshal: ' + q + s + q) + u.raise_exc("bad marshal data (unknown type code)") + def register(codes, func): """NOT_RPYTHON""" diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -18,11 +18,13 @@ exc = raises(TypeError, marshal.loads, memoryview(s)) assert str(exc.value) == "must be string or read-only buffer, not memoryview" - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case and type(x) is type(case) + import sys + if '__pypy__' in sys.builtin_module_names: + f = StringIO.StringIO() + marshal.dump(case, f) + f.seek(0) + x = marshal.load(f) + assert x == case and type(x) is type(case) return x def test_None(self): @@ -195,7 +197,7 @@ def test_bad_typecode(self): import marshal exc = raises(ValueError, marshal.loads, chr(1)) - assert r"'\x01'" in exc.value.message + assert str(exc.value) == "bad marshal data (unknown type code)" class AppTestSmallLong(AppTestMarshal): From noreply at buildbot.pypy.org Thu Apr 24 22:52:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:16 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix float.__new__ bufferstr behavior Message-ID: <20140424205216.92D3C1C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70941:47fd7571d5f0 Date: 2014-04-24 16:49 -0400 http://bitbucket.org/pypy/pypy/changeset/47fd7571d5f0/ Log: fix float.__new__ bufferstr behavior diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -23,7 +23,7 @@ register_all(vars(), globals()) - at unwrap_spec(w_x = WrappedDefault(0.0)) + at unwrap_spec(w_x=WrappedDefault(0.0)) def descr__new__(space, w_floattype, w_x): from pypy.objspace.std.floatobject import W_FloatObject w_value = w_x # 'x' is the keyword argument name in CPython @@ -32,15 +32,19 @@ if space.is_w(w_floattype, space.w_float): return w_obj value = space.float_w(w_obj) - elif (space.isinstance_w(w_value, space.w_str) or - space.isinstance_w(w_value, space.w_bytearray)): - value = _string_to_float(space, w_value, space.bufferstr_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from unicodeobject import unicode_to_decimal_w value = _string_to_float(space, w_value, unicode_to_decimal_w(space, w_value)) else: - value = space.float_w(w_x) + try: + value = space.charbuf_w(w_value) + except OperationError as e: + if e.match(space, space.w_TypeError): + raise OperationError(space.w_TypeError, space.wrap( + "float() argument must be a string or a number")) + raise + value = _string_to_float(space, w_value, value) w_obj = space.allocate_instance(W_FloatObject, w_floattype) W_FloatObject.__init__(w_obj, value) return w_obj diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -61,7 +61,7 @@ class AppTestAppFloatTest: spaceconfig = dict(usemodules=['binascii', 'rctime']) - + def setup_class(cls): cls.w_py26 = cls.space.wrap(sys.version_info >= (2, 6)) @@ -138,6 +138,11 @@ assert repr(float("+nan")) == "nan" assert repr(float("-nAn")) == "nan" + assert float(buffer("inf")) == inf + assert float(bytearray("inf")) == inf + exc = raises(TypeError, float, memoryview("inf")) + assert str(exc.value) == "float() argument must be a string or a number" + def test_float_unicode(self): # u00A0 and u2000 are some kind of spaces assert 42.75 == float(unichr(0x00A0)+unicode("42.75")+unichr(0x2000)) @@ -812,7 +817,7 @@ def check(a, b): assert (a, math.copysign(1.0, a)) == (b, math.copysign(1.0, b)) - + check(mod(-1.0, 1.0), 0.0) check(mod(-1e-100, 1.0), 1.0) check(mod(-0.0, 1.0), 0.0) From noreply at buildbot.pypy.org Thu Apr 24 22:52:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:18 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140424205218.1E2E41C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70942:ea86924e88fb Date: 2014-04-24 16:51 -0400 http://bitbucket.org/pypy/pypy/changeset/ea86924e88fb/ Log: merge heads diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -327,21 +327,8 @@ def invalid_typecode(space, u, tc): - # %r not supported in rpython - #u.raise_exc('invalid typecode in unmarshal: %r' % tc) - c = ord(tc) - if c < 16: - s = '\\x0%x' % c - elif c < 32 or c > 126: - s = '\\x%x' % c - elif tc == '\\': - s = r'\\' - else: - s = tc - q = "'" - if s[0] == "'": - q = '"' - u.raise_exc('invalid typecode in unmarshal: ' + q + s + q) + u.raise_exc("bad marshal data (unknown type code)") + def register(codes, func): """NOT_RPYTHON""" diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -14,11 +14,14 @@ print(repr(s)) x = marshal.loads(s) assert x == case and type(x) is type(case) - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case and type(x) is type(case) + + import sys + if '__pypy__' in sys.builtin_module_names: + f = StringIO.StringIO() + marshal.dump(case, f) + f.seek(0) + x = marshal.load(f) + assert x == case and type(x) is type(case) return x def test_None(self): @@ -191,7 +194,7 @@ def test_bad_typecode(self): import marshal exc = raises(ValueError, marshal.loads, chr(1)) - assert r"'\x01'" in exc.value.message + assert str(exc.value) == "bad marshal data (unknown type code)" class AppTestSmallLong(AppTestMarshal): From noreply at buildbot.pypy.org Thu Apr 24 22:52:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 22:52:19 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: merge default Message-ID: <20140424205219.495FA1C3512@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70943:513cd84a914e Date: 2014-04-24 16:51 -0400 http://bitbucket.org/pypy/pypy/changeset/513cd84a914e/ Log: merge default diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -38,6 +38,24 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; +class PseudoExample01 { +public: + PseudoExample01() : m_somedata(-99) {} + PseudoExample01(int a) : m_somedata(a) {} + PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} + PseudoExample01& operator=(const PseudoExample01& e) { + if (this != &e) m_somedata = e.m_somedata; + return *this; + } + virtual ~PseudoExample01() {} + +public: + int m_somedata; +}; + +static int example01_last_static_method = 0; +static int example01_last_constructor = 0; + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- @@ -46,27 +64,62 @@ std::vector methods; - // static double staticAddToDouble(double a); + // ( 0) static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); - // static int staticAddOneToInt(int a); - // static int staticAddOneToInt(int a, int b); + // ( 1) static int staticAddOneToInt(int a) + // ( 2) static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); - // static int staticAtoi(const char* str); + // ( 3) static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); - // static char* staticStrcpy(const char* strin); + // ( 4) static char* staticStrcpy(const char* strin) methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // ( 5) static void staticSetPayload(payload* p, double d) + // ( 6) static payload* staticCyclePayload(payload* p, double d) + // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + argtypes.clear(); + argtypes.push_back("payload*"); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + + // ( 8) static int getCount() + // ( 9) static void setCount(int) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + + // cut-off is used in cppyy_is_static + example01_last_static_method = methods.size(); + + // (10) example01() + // (11) example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + + // cut-off is used in cppyy_is_constructor + example01_last_constructor = methods.size(); + + // (12) double addDataToDouble(double a) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; // -- class example01 @@ -98,47 +151,69 @@ } +/* memory management ------------------------------------------------------ */ +void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { + if (handle == s_handles["example01"]) + delete (PseudoExample01*)self; +} + + /* method/function dispatching -------------------------------------------- */ -template -static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - T result = T(); +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + int result = 0; switch ((long)method) { - case 0: // double staticAddToDouble(double) - assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; - break; - case 1: // int staticAddOneToInt(int) + case 1: // static int staticAddOneToInt(int) assert(!self && nargs == 1); result = ((CPPYY_G__value*)args)[0].obj.in + 1; break; - case 2: // int staticAddOneToInt(int, int) + case 2: // static int staticAddOneToInt(int, int) assert(!self && nargs == 2); result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; break; - case 3: // int staticAtoi(const char* str) + case 3: // static int staticAtoi(const char* str) assert(!self && nargs == 1); result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; + case 8: // static int getCount() + assert(!self && nargs == 0); + // can't actually call this method (would need to resolve example01::count), but + // other than the memory tests, most tests just check for 0 at the end + result = 0; + break; default: + assert(!"method unknown in cppyy_call_i"); break; } return result; } -int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); -} - long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + if ((long)method == 4) { // static char* staticStrcpy(const char* strin) + const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); + char* strout = (char*)malloc(::strlen(strin)+1); + ::strcpy(strout, strin); + return (long)strout; + } + assert(!"method unknown in cppyy_call_l"); + return 0; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); + double result = 0.; + switch ((long)method) { + case 0: // static double staticAddToDouble(double) + assert(!self && nargs == 1); + result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + break; + case 12: // double addDataToDouble(double a) + assert(self && nargs == 1); + result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + break; + default: + assert(!"method unknown in cppyy_call_d"); + break; + } + return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { @@ -149,10 +224,31 @@ return strout; } +cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { + void* result = 0; + if (handle == s_handles["example01"]) { + switch ((long)method) { + case 10: + assert(nargs == 0); + result = new PseudoExample01; + break; + case 11: + assert(nargs == 1); + result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + break; + default: + assert(!"method unknown in cppyy_constructor"); + break; + } + } + return (cppyy_object_t)result; +} + cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { return (cppyy_methptrgetter_t)0; } + /* handling of function argument buffer ----------------------------------- */ void* cppyy_allocate_function_args(size_t nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); @@ -200,7 +296,11 @@ } int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { - return 1; + return 0; +} + +int cppyy_num_bases(cppyy_type_t /*handle*/) { + return 0; } @@ -252,11 +352,16 @@ /* method properties ----------------------------------------------------- */ -int cppyy_is_constructor(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return example01_last_static_method <= method_index + && method_index < example01_last_constructor; return 0; } -int cppyy_is_staticmethod(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return method_index < example01_last_static_method ? 1 : 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,11 +7,12 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not item.location[0] in ['test_helper.py', 'test_cppyy.py']: + if not ('test_helper.py' in item.location[0] or \ + 'test_cppyy.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if item.location[0] == 'test_cppyy.py' and \ - not re.search("test0[1-3]", item.location[2]): + if 'test_cppyy.py' in item.location[0] and \ + not re.search("test0[1-36]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_configure(config): From noreply at buildbot.pypy.org Thu Apr 24 23:06:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 23:06:54 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: correct a use of bufferstr_w in marshal_impl Message-ID: <20140424210654.94C871D2368@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70944:73a08eb40847 Date: 2014-04-24 16:58 -0400 http://bitbucket.org/pypy/pypy/changeset/73a08eb40847/ Log: correct a use of bufferstr_w in marshal_impl diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -43,6 +43,8 @@ s = marshal.dumps(array.array('c', 'asd')) t = marshal.loads(s) assert type(t) is str and t == 'asd' + exc = raises(ValueError, marshal.dumps, memoryview('asd')) + assert str(exc.value) == "unmarshallable object" def test_unmarshal_evil_long(self): import marshal diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -460,12 +460,12 @@ # any unknown object implementing the buffer protocol is # accepted and encoded as a plain string try: - s = space.bufferstr_w(w_obj) + s = space.readbuf_w(w_obj) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - m.atom_str(TYPE_STRING, s) + m.atom_str(TYPE_STRING, s.as_str()) return raise_exception(space, "unmarshallable object") From noreply at buildbot.pypy.org Thu Apr 24 23:06:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 23:06:55 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: replace bufferstr in _multiprocessing Message-ID: <20140424210655.BB72E1D2368@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70945:9d4e8bf5ea02 Date: 2014-04-24 17:02 -0400 http://bitbucket.org/pypy/pypy/changeset/9d4e8bf5ea02/ Log: replace bufferstr in _multiprocessing diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -80,8 +80,9 @@ raise OperationError(space.w_IOError, space.wrap("connection is read-only")) - @unwrap_spec(buf='bufferstr', offset='index', size='index') - def send_bytes(self, space, buf, offset=0, size=PY_SSIZE_T_MIN): + @unwrap_spec(offset='index', size='index') + def send_bytes(self, space, w_buf, offset=0, size=PY_SSIZE_T_MIN): + buf = space.getarg_w('s*', w_buf).as_str() length = len(buf) self._check_writable(space) if offset < 0: @@ -149,7 +150,7 @@ w_pickled = space.call_method( w_picklemodule, "dumps", w_obj, w_protocol) - buf = space.bufferstr_w(w_pickled) + buf = space.str_w(w_pickled) self.do_send_string(space, buf, 0, len(buf)) def recv(self, space): From noreply at buildbot.pypy.org Thu Apr 24 23:30:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 23:30:49 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: fix _cffi_backend test_ztranslation Message-ID: <20140424213049.0B7F71C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70946:62510b3da5bf Date: 2014-04-24 17:27 -0400 http://bitbucket.org/pypy/pypy/changeset/62510b3da5bf/ Log: fix _cffi_backend test_ztranslation diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1841,6 +1841,7 @@ 'AssertionError', 'AttributeError', 'BaseException', + 'BufferError', 'DeprecationWarning', 'EOFError', 'EnvironmentError', From noreply at buildbot.pypy.org Thu Apr 24 23:43:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 23:43:07 +0200 (CEST) Subject: [pypy-commit] pypy refactor-buffer-api: close branch for merging Message-ID: <20140424214307.A9EE91C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: refactor-buffer-api Changeset: r70947:b221c1831061 Date: 2014-04-24 17:32 -0400 http://bitbucket.org/pypy/pypy/changeset/b221c1831061/ Log: close branch for merging From noreply at buildbot.pypy.org Thu Apr 24 23:43:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 23:43:10 +0200 (CEST) Subject: [pypy-commit] pypy default: merge refactor-buffer-api Message-ID: <20140424214310.2E9681C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70948:559486c5650c Date: 2014-04-24 17:34 -0400 http://bitbucket.org/pypy/pypy/changeset/559486c5650c/ Log: merge refactor-buffer-api * properly implements old/new buffer api for objects * begins work on replacing bufferstr usage diff too long, truncating to 2000 out of 3701 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) + compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX @@ -194,13 +194,37 @@ def immutable_unique_id(self, space): return None - def buffer_w(self, space): + def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): - return w_result.buffer_w(space) - self._typed_unwrap_error(space, "buffer") + return w_result.buffer_w(space, flags) + raise TypeError + + def readbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.readbuf_w(space) + raise TypeError + + def writebuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.writebuf_w(space) + raise TypeError + + def charbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.charbuf_w(space) + raise TypeError def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1340,25 +1364,111 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - def buffer_w(self, w_obj): - return w_obj.buffer_w(self) + BUF_SIMPLE = 0x0000 + BUF_WRITABLE = 0x0001 + BUF_FORMAT = 0x0004 + BUF_ND = 0x0008 + BUF_STRIDES = 0x0010 | BUF_ND + BUF_INDIRECT = 0x0100 | BUF_STRIDES - def rwbuffer_w(self, w_obj): - # returns a RWBuffer instance - from pypy.interpreter.buffer import RWBuffer - buffer = self.buffer_w(w_obj) - if not isinstance(buffer, RWBuffer): - raise OperationError(self.w_TypeError, - self.wrap('read-write buffer expected')) - return buffer + BUF_CONTIG_RO = BUF_ND + BUF_CONTIG = BUF_ND | BUF_WRITABLE - def bufferstr_new_w(self, w_obj): - # Implement the "new buffer interface" (new in Python 2.7) - # returning an unwrapped string. It doesn't accept unicode - # strings - buffer = self.buffer_w(w_obj) - return buffer.as_str() + BUF_FULL_RO = BUF_INDIRECT | BUF_FORMAT + BUF_FULL = BUF_INDIRECT | BUF_FORMAT | BUF_WRITABLE + def check_buf_flags(self, flags, readonly): + if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: + raise oefmt(self.w_BufferError, "Object is not writable.") + + def buffer_w(self, w_obj, flags): + # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) + try: + return w_obj.buffer_w(self, flags) + except TypeError: + raise oefmt(self.w_TypeError, + "'%T' does not have the buffer interface", w_obj) + + def readbuf_w(self, w_obj): + # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) + try: + return w_obj.readbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a readable buffer object") + + def writebuf_w(self, w_obj): + # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) + try: + return w_obj.writebuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a writeable buffer object") + + def charbuf_w(self, w_obj): + # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) + try: + return w_obj.charbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a character buffer object") + + def _getarg_error(self, expected, w_obj): + if self.is_none(w_obj): + name = "None" + else: + name = self.type(w_obj).get_module_type_name() + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + + @specialize.arg(1) + def getarg_w(self, code, w_obj): + if code == 'z*': + if self.is_none(w_obj): + return None + code = 's*' + if code == 's*': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.readbuf_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).readbuf_w(self) + try: + return w_obj.buffer_w(self, 0) + except TypeError: + pass + try: + return w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + elif code == 's#': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.str_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).str_w(self) + try: + return w_obj.readbuf_w(self).as_str() + except TypeError: + self._getarg_error("string or read-only buffer", w_obj) + elif code == 'w*': + try: + try: + return w_obj.buffer_w(self, self.BUF_WRITABLE) + except OperationError: + self._getarg_error("read-write buffer", w_obj) + except TypeError: + pass + try: + return w_obj.writebuf_w(self) + except TypeError: + self._getarg_error("read-write buffer", w_obj) + elif code == 't#': + try: + return w_obj.charbuf_w(self) + except TypeError: + self._getarg_error("string or read-only character buffer", w_obj) + else: + assert False + + # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): @@ -1373,8 +1483,18 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - buffer = self.buffer_w(w_obj) - return buffer.as_str() + try: + buf = w_obj.buffer_w(self, 0) + except TypeError: + pass + else: + return buf.as_str() + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + else: + return buf.as_str() def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): @@ -1721,6 +1841,7 @@ 'AssertionError', 'AttributeError', 'BaseException', + 'BufferError', 'DeprecationWarning', 'EOFError', 'EnvironmentError', diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py deleted file mode 100644 --- a/pypy/interpreter/buffer.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Buffer protocol support. -""" -from rpython.rlib.objectmodel import import_from_mixin - - -class Buffer(object): - """Abstract base class for buffers.""" - __slots__ = [] - - def getlength(self): - raise NotImplementedError - - def as_str(self): - "Returns an interp-level string with the whole content of the buffer." - # May be overridden. - return self.getslice(0, self.getlength(), 1, self.getlength()) - - def getitem(self, index): - "Returns the index'th character in the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def getslice(self, start, stop, step, size): - # May be overridden. No bounds checks. - return ''.join([self.getitem(i) for i in range(start, stop, step)]) - - def get_raw_address(self): - raise ValueError("no raw buffer") - - def is_writable(self): - return False - - -class RWBuffer(Buffer): - """Abstract base class for read-write buffers.""" - __slots__ = [] - - def is_writable(self): - return True - - def setitem(self, index, char): - "Write a character into the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def setslice(self, start, string): - # May be overridden. No bounds checks. - for i in range(len(string)): - self.setitem(start + i, string[i]) - - -class StringBuffer(Buffer): - __slots__ = ['value'] - - def __init__(self, value): - self.value = value - - def getlength(self): - return len(self.value) - - def as_str(self): - return self.value - - def getitem(self, index): - return self.value[index] - - def getslice(self, start, stop, step, size): - if size == 0: - return "" - if step == 1: - assert 0 <= start <= stop - return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) -# ____________________________________________________________ - - -class SubBufferMixin(object): - _attrs_ = ['buffer', 'offset', 'size'] - - def __init__(self, buffer, offset, size): - self.buffer = buffer - self.offset = offset - self.size = size - - def getlength(self): - at_most = self.buffer.getlength() - self.offset - if 0 <= self.size <= at_most: - return self.size - elif at_most >= 0: - return at_most - else: - return 0 - - def getitem(self, index): - return self.buffer.getitem(self.offset + index) - - def getslice(self, start, stop, step, size): - if start == stop: - return '' # otherwise, adding self.offset might make them - # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, - step, size) - - -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) - - -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) - - def setitem(self, index, char): - self.buffer.setitem(self.offset + index, char) - - def setslice(self, start, string): - if len(string) == 0: - return # otherwise, adding self.offset might make 'start' - # out of bounds - self.buffer.setslice(self.offset + start, string) diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py deleted file mode 100644 --- a/pypy/interpreter/test/test_buffer.py +++ /dev/null @@ -1,43 +0,0 @@ -import py -from rpython.tool.udir import udir - -testdir = udir.ensure('test_buffer', dir=1) - - -class TestBuffer: - def test_buffer_w(self): - space = self.space - w_hello = space.wrap('hello world') - buf = space.buffer_w(w_hello) - assert buf.getlength() == 11 - assert buf.as_str() == 'hello world' - assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.newbuffer(buf)) is buf - assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' - space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - - def test_file_write(self): - space = self.space - w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) - filename = str(testdir.join('test_file_write')) - space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): - f = open(filename, 'wb') - f.write(buffer) - f.close() - """) - f = open(filename, 'rb') - data = f.read() - f.close() - assert data == 'hello world' - - def test_unicode(self): - space = self.space - s = space.bufferstr_w(space.wrap(u'hello')) - assert type(s) is str - assert s == 'hello' - space.raises_w(space.w_UnicodeEncodeError, - space.bufferstr_w, space.wrap(u'\xe9')) - - -# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,13 +2,16 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec -class ByteBuffer(RWBuffer): +class ByteBuffer(Buffer): + _immutable_ = True + def __init__(self, len): self.data = ['\x00'] * len + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -13,3 +13,17 @@ assert b[-1] == '*' assert b[-2] == '-' assert b[-3] == '+' + exc = raises(TypeError, "b[3] = 'abc'") + assert str(exc.value) == "right operand must be a single byte" + exc = raises(TypeError, "b[3:5] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + exc = raises(TypeError, "b[3:7:2] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + + b = bytebuffer(10) + b[1:3] = 'xy' + assert str(b) == "\x00xy" + "\x00" * 7 + b[4:8:2] = 'zw' + assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 + b[6:10] = u'#' + assert str(b) == "\x00xy\x00z\x00#" + "\x00" * 3 diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,21 +1,22 @@ -from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer +from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw -class LLBuffer(RWBuffer): +class LLBuffer(Buffer): _immutable_ = True def __init__(self, raw_cdata, size): self.raw_cdata = raw_cdata self.size = size + self.readonly = False def getlength(self): return self.size @@ -32,7 +33,7 @@ def getslice(self, start, stop, step, size): if step == 1: return rffi.charpsize2str(rffi.ptradd(self.raw_cdata, start), size) - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) @@ -46,6 +47,14 @@ W_Buffer.__init__(self, buffer) self.keepalive = keepalive + def descr_setitem(self, space, w_index, w_obj): + try: + W_Buffer.descr_setitem(self, space, w_index, w_obj) + except OperationError as e: + if e.match(space, space.w_TypeError): + e.w_type = space.w_ValueError + raise + MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -72,8 +72,8 @@ 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', - 'charbuffer_encode': 'interp_codecs.buffer_encode', - 'readbuffer_encode': 'interp_codecs.buffer_encode', + 'charbuffer_encode': 'interp_codecs.charbuffer_encode', + 'readbuffer_encode': 'interp_codecs.readbuffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', 'charmap_encode' : 'interp_codecs.charmap_encode', 'escape_encode' : 'interp_codecs.escape_encode', diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -321,8 +321,14 @@ w_res = space.call_function(w_encoder, w_obj, space.wrap(errors)) return space.getitem(w_res, space.wrap(0)) - at unwrap_spec(s='bufferstr', errors='str_or_None') -def buffer_encode(space, s, errors='strict'): + at unwrap_spec(errors='str_or_None') +def readbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('s#', w_data) + return space.newtuple([space.wrap(s), space.wrap(len(s))]) + + at unwrap_spec(errors='str_or_None') +def charbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('t#', w_data) return space.newtuple([space.wrap(s), space.wrap(len(s))]) @unwrap_spec(errors=str) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -420,9 +420,13 @@ for (i, line) in enumerate(reader): assert line == s[i] - def test_array(self): + def test_buffer_encode(self): import _codecs, array - _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + assert _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + exc = raises(TypeError, _codecs.charbuffer_encode, array.array('c', 'spam')) + assert str(exc.value) == "must be string or read-only character buffer, not array.array" + assert _codecs.readbuffer_encode(u"test") == ('test', 4) + assert _codecs.charbuffer_encode(u"test") == ('test', 4) def test_utf8sig(self): import codecs diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -267,9 +267,14 @@ def direct_write(self, w_data): space = self.space - if not self.binary and space.isinstance_w(w_data, space.w_unicode): - w_data = space.call_method(w_data, "encode", space.wrap(self.encoding), space.wrap(self.errors)) - data = space.bufferstr_w(w_data) + if self.binary: + data = space.getarg_w('s*', w_data).as_str() + else: + if space.isinstance_w(w_data, space.w_unicode): + w_data = space.call_method(w_data, "encode", + space.wrap(self.encoding), + space.wrap(self.errors)) + data = space.charbuf_w(w_data) self.do_direct_write(data) def do_direct_write(self, data): @@ -469,7 +474,7 @@ """readinto() -> Undocumented. Don't use this; it may go away.""" # XXX not the most efficient solution as it doesn't avoid the copying space = self.space - rwbuffer = space.rwbuffer_w(w_rwbuffer) + rwbuffer = space.writebuf_w(w_rwbuffer) w_data = self.file_read(rwbuffer.getlength()) data = space.str_w(w_data) rwbuffer.setslice(0, data) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -80,7 +80,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) @@ -101,11 +101,14 @@ readinto = interp2app(W_BufferedIOBase.readinto_w), ) -class RawBuffer(RWBuffer): +class RawBuffer(Buffer): + _immutable_ = True + def __init__(self, buf, start, length): self.buf = buf self.start = start self.length = length + self.readonly = False def getlength(self): return self.length @@ -698,7 +701,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -41,7 +41,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) size = rwbuffer.getlength() output = self.read(size) @@ -50,10 +50,7 @@ def write_w(self, space, w_data): self._check_closed(space) - if space.isinstance_w(w_data, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - "bytes string of buffer expected")) - buf = space.bufferstr_w(w_data) + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -333,7 +333,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() try: n = os.write(self.fd, data) @@ -366,7 +366,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,14 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\ncxxxxx' @@ -235,7 +243,8 @@ import _io raw = _io.FileIO(self.tmpfile, 'w') f = _io.BufferedWriter(raw) - f.write("abcd") + f.write("ab") + f.write(u"cd") f.close() assert self.readfile() == "abcd" diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -38,6 +38,8 @@ f = _io.BytesIO() assert f.write("") == 0 assert f.write("hello") == 5 + exc = raises(TypeError, f.write, u"lo") + assert str(exc.value) == "'unicode' does not have the buffer interface" import gc; gc.collect() assert f.getvalue() == "hello" f.close() @@ -97,6 +99,14 @@ a2 = bytearray('testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, b.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() assert a1 == "h" assert a2 == "elloing" diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -82,7 +82,8 @@ import _io filename = self.tmpfile + '_w' f = _io.FileIO(filename, 'wb') - f.write("test") + f.write("te") + f.write(u"st") # try without flushing f2 = _io.FileIO(filename, 'rb') assert f2.read() == "test" @@ -135,6 +136,14 @@ a = bytearray('x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\nc\0\0\0\0\0' # diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -80,8 +80,9 @@ raise OperationError(space.w_IOError, space.wrap("connection is read-only")) - @unwrap_spec(buf='bufferstr', offset='index', size='index') - def send_bytes(self, space, buf, offset=0, size=PY_SSIZE_T_MIN): + @unwrap_spec(offset='index', size='index') + def send_bytes(self, space, w_buf, offset=0, size=PY_SSIZE_T_MIN): + buf = space.getarg_w('s*', w_buf).as_str() length = len(buf) self._check_writable(space) if offset < 0: @@ -122,7 +123,7 @@ @unwrap_spec(offset='index') def recv_bytes_into(self, space, w_buffer, offset=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) length = rwbuffer.getlength() res, newbuf = self.do_recv_string( @@ -149,7 +150,7 @@ w_pickled = space.call_method( w_picklemodule, "dumps", w_obj, w_protocol) - buf = space.bufferstr_w(w_pickled) + buf = space.str_w(w_pickled) self.do_send_string(space, buf, 0, len(buf)) def recv(self, space): diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,12 +1,14 @@ -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer # XXX not the most efficient implementation -class RawFFIBuffer(RWBuffer): +class RawFFIBuffer(Buffer): + _immutable_ = True def __init__(self, datainstance): self.datainstance = datainstance + self.readonly = False def getlength(self): return self.datainstance.getrawsize() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -16,6 +16,7 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.rarithmetic import intmask, r_uint +from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker TYPEMAP = { @@ -352,8 +353,13 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def buffer_w(self, space): - from pypy.module._rawffi.buffer import RawFFIBuffer + def buffer_w(self, space, flags): + return RawFFIBuffer(self) + + def readbuf_w(self, space): + return RawFFIBuffer(self) + + def writebuf_w(self, space): return RawFFIBuffer(self) def getrawsize(self): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1100,6 +1100,12 @@ assert a[3] == 'z' assert a[4] == 't' + b = memoryview(a) + assert len(b) == 10 + assert b[3] == 'z' + b[3] = 'x' + assert b[3] == 'x' + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -419,7 +419,7 @@ @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt @@ -430,7 +430,7 @@ @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -545,8 +545,12 @@ s.connect(("www.python.org", 80)) except _socket.gaierror, ex: skip("GAIError - probably no connection: %s" % str(ex.args)) + exc = raises(TypeError, s.send, None) + assert str(exc.value) == "must be string or buffer, not None" assert s.send(buffer('')) == 0 assert s.sendall(buffer('')) is None + assert s.send(memoryview('')) == 0 + assert s.sendall(memoryview('')) is None assert s.send(u'') == 0 assert s.sendall(u'') is None raises(UnicodeEncodeError, s.send, u'\xe9') @@ -678,6 +682,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes = cli.recv_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_recvfrom_into(self): import socket import array @@ -693,6 +704,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes, addr = cli.recvfrom_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -114,7 +114,7 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - buf = space.buffer_w(w_string) + buf = space.readbuf_w(w_string) size = buf.getlength() assert size >= 0 if pos > size: diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -1,6 +1,7 @@ from __future__ import with_statement from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable @@ -9,7 +10,6 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( interp2app, interpindirect2app, unwrap_spec) @@ -132,8 +132,11 @@ self.len = 0 self.allocated = 0 - def buffer_w(self, space): - return ArrayBuffer(self) + def readbuf_w(self, space): + return ArrayBuffer(self, True) + + def writebuf_w(self, space): + return ArrayBuffer(self, False) def descr_append(self, space, w_x): """ append(x) @@ -583,9 +586,12 @@ v.typecode = k unroll_typecodes = unrolling_iterable(types.keys()) -class ArrayBuffer(RWBuffer): - def __init__(self, array): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, array, readonly): self.array = array + self.readonly = readonly def getlength(self): return self.array.len * self.array.itemsize diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -421,12 +421,8 @@ def test_buffer_write(self): a = self.array('c', 'hello') buf = buffer(a) - print repr(buf) - try: - buf[3] = 'L' - except TypeError: - skip("buffer(array) returns a read-only buffer on CPython") - assert a.tostring() == 'helLo' + exc = raises(TypeError, "buf[3] = 'L'") + assert str(exc.value) == "buffer is read-only" def test_buffer_keepalive(self): buf = buffer(self.array('c', 'text')) diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py --- a/pypy/module/cStringIO/interp_stringio.py +++ b/pypy/module/cStringIO/interp_stringio.py @@ -160,10 +160,10 @@ raise OperationError(space.w_IOError, space.wrap("negative size")) self.truncate(size) - @unwrap_spec(buffer='bufferstr') - def descr_write(self, buffer): + def descr_write(self, space, w_buffer): + buffer = space.getarg_w('s*', w_buffer) self.check_closed() - self.write(buffer) + self.write(buffer.as_str()) def descr_writelines(self, w_lines): self.check_closed() @@ -236,5 +236,5 @@ if space.is_none(w_string): return space.wrap(W_OutputType(space)) else: - string = space.bufferstr_w(w_string) + string = space.getarg_w('s*', w_string).as_str() return space.wrap(W_InputType(space, string)) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj) + buf = space.buffer_w(w_obj, space.BUF_SIMPLE) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,7 +22,6 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod -from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject @@ -474,7 +473,7 @@ "PyLong_Type": "space.w_long", "PyComplex_Type": "space.w_complex", "PyByteArray_Type": "space.w_bytearray", - "PyMemoryView_Type": "space.gettypeobject(W_MemoryView.typedef)", + "PyMemoryView_Type": "space.w_memoryview", "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", "PyBaseObject_Type": "space.w_object", 'PyNone_Type': 'space.type(space.w_None)', diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -1,12 +1,12 @@ +from rpython.rlib.buffer import StringBuffer, SubBuffer from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.buffer import StringBuffer, SubBuffer from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref from pypy.module.array.interp_array import ArrayBuffer -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -15,8 +15,8 @@ from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments +from rpython.rlib.buffer import Buffer from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize from rpython.tool.sourcetools import func_renamer @@ -230,11 +230,13 @@ class CPyBuffer(Buffer): # Similar to Py_buffer + _immutable_ = True def __init__(self, ptr, size, w_obj): self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive + self.readonly = True def getlength(self): return self.size diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -53,8 +53,11 @@ def test_buffer(self): module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) + buf = buffer(arr) + exc = raises(TypeError, "buf[1] = '1'") + assert str(exc.value) == "buffer is read-only" # XXX big-endian - assert str(buffer(arr)) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -1,6 +1,6 @@ from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, wrap_oserror, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -92,33 +92,27 @@ op = rffi.cast(rffi.INT, op) # C long => C int try: - intarg = space.int_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = fcntl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "fcntl") - return space.wrap(rv) - - try: - arg = space.bufferstr_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: ll_arg = rffi.str2charp(arg) - rv = fcntl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "fcntl") - return space.wrap(arg) + try: + rv = fcntl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "fcntl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - raise OperationError(space.w_TypeError, - space.wrap("int or string or buffer required")) + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = fcntl_int(fd, op, intarg) + if rv < 0: + raise _get_error(space, "fcntl") + return space.wrap(rv) @unwrap_spec(op=int) def flock(space, w_fd, op): @@ -207,50 +201,50 @@ fd = space.c_filedescriptor_w(w_fd) op = rffi.cast(rffi.INT, op) # C long => C int - if mutate_flag != 0: - try: - rwbuffer = space.rwbuffer_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - if mutate_flag > 0: - raise - else: - arg = rwbuffer.as_str() - ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "ioctl") - rwbuffer.setslice(0, arg) - return space.wrap(rv) - try: - intarg = space.int_w(w_arg) + rwbuffer = space.writebuf_w(w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = ioctl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "ioctl") - return space.wrap(rv) + arg = rwbuffer.as_str() + ll_arg = rffi.str2charp(arg) + try: + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + if mutate_flag != 0: + rwbuffer.setslice(0, arg) + return space.wrap(rv) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') + + if mutate_flag != -1: + raise OperationError(space.w_TypeError, space.wrap( + "ioctl requires a file or file descriptor, an integer " + "and optionally an integer or buffer argument")) try: - arg = space.bufferstr_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "ioctl") - return space.wrap(arg) + try: + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - raise OperationError(space.w_TypeError, - space.wrap("int or string or buffer required")) + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = ioctl_int(fd, op, intarg) + if rv < 0: + raise _get_error(space, "ioctl") + return space.wrap(rv) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -51,6 +51,8 @@ assert fcntl.fcntl(f, 1, 0) == 0 assert fcntl.fcntl(f, 2, "foo") == "foo" assert fcntl.fcntl(f, 2, buffer("foo")) == "foo" + exc = raises(TypeError, fcntl.fcntl, f, 2, memoryview("foo")) + assert 'integer' in str(exc.value) try: os.O_LARGEFILE @@ -226,6 +228,18 @@ assert res == 0 assert buf.tostring() == expected + buf = array.array('i', [0]) + res = fcntl.ioctl(mfd, TIOCGPGRP, buffer(buf)) + assert res == expected + assert buf.tostring() == '\x00' * 4 + + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview('abc')) + assert 'integer' in str(exc.value) + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, buffer(buf), False) + assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview('abc'), False) + assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" + res = fcntl.ioctl(mfd, TIOCGPGRP, buf, False) assert res == expected diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -220,7 +220,7 @@ space = self.space if space.type(w_obj).is_heaptype(): try: - buf = space.buffer_w(w_obj) + buf = space.readbuf_w(w_obj) except OperationError as e: if not e.match(space, space.w_TypeError): raise @@ -463,13 +463,7 @@ # Unmarshaller with inlined buffer string def __init__(self, space, w_str): Unmarshaller.__init__(self, space, None) - try: - self.bufstr = space.bufferstr_w(w_str) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, space.wrap( - 'marshal.loads() arg must be string or buffer')) + self.bufstr = space.getarg_w('s#', w_str) self.bufpos = 0 self.limit = len(self.bufstr) diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -15,6 +15,9 @@ x = marshal.loads(s) assert x == case and type(x) is type(case) + exc = raises(TypeError, marshal.loads, memoryview(s)) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + import sys if '__pypy__' in sys.builtin_module_names: f = StringIO.StringIO() diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -43,6 +43,8 @@ s = marshal.dumps(array.array('c', 'asd')) t = marshal.loads(s) assert type(t) is str and t == 'asd' + exc = raises(ValueError, marshal.dumps, memoryview('asd')) + assert str(exc.value) == "unmarshallable object" def test_unmarshal_evil_long(self): import marshal diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -344,8 +344,14 @@ def descr_copy(self, space): return self.convert_to(space, self.get_dtype(space)) - def buffer_w(self, space): - return self.descr_ravel(space).buffer_w(space) + def buffer_w(self, space, flags): + return self.descr_ravel(space).buffer_w(space, flags) + + def readbuf_w(self, space): + return self.descr_ravel(space).readbuf_w(space) + + def charbuf_w(self, space): + return self.descr_ravel(space).charbuf_w(space) def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,6 +1,6 @@ -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE @@ -316,8 +316,8 @@ def get_storage(self): return self.storage - def get_buffer(self, space): - return ArrayBuffer(self) + def get_buffer(self, space, readonly): + return ArrayBuffer(self, readonly) def astype(self, space, dtype): strides, backstrides = calc_strides(self.get_shape(), dtype, @@ -471,9 +471,12 @@ free_raw_storage(self.storage) -class ArrayBuffer(RWBuffer): - def __init__(self, impl): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, impl, readonly): self.impl = impl + self.readonly = readonly def getitem(self, item): return raw_storage_getitem(lltype.Char, self.impl.storage, item) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -610,11 +610,20 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ctypes not implemented yet")) - def buffer_w(self, space): - return self.implementation.get_buffer(space) + def buffer_w(self, space, flags): + return self.implementation.get_buffer(space, True) + + def readbuf_w(self, space): + return self.implementation.get_buffer(space, True) + + def writebuf_w(self, space): + return self.implementation.get_buffer(space, False) + + def charbuf_w(self, space): + return self.implementation.get_buffer(space, True).as_str() def descr_get_data(self, space): - return space.newbuffer(self.buffer_w(space)) + return space.newbuffer(self.implementation.get_buffer(space, False)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -1188,7 +1197,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) - buf = space.buffer_w(w_buffer) + try: + buf = space.writebuf_w(w_buffer) + except OperationError: + buf = space.readbuf_w(w_buffer) try: raw_ptr = buf.get_raw_address() except ValueError: @@ -1206,7 +1218,7 @@ return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, w_subtype=w_subtype, w_base=w_buffer, - writable=buf.is_writable()) + writable=not buf.readonly) order = order_converter(space, w_order, NPY.CORDER) if order == NPY.CORDER: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -347,6 +347,9 @@ a = np.array([1,2,3]) b = buffer(a) assert type(b) is buffer + assert 'read-only buffer' in repr(b) + exc = raises(TypeError, "b[0] = '0'") + assert str(exc.value) == 'buffer is read-only' def test_type(self): from numpypy import array @@ -2242,6 +2245,7 @@ a.data[4] = '\xff' assert a[1] == 0xff assert len(a.data) == 16 + assert type(a.data) is buffer def test_explicit_dtype_conversion(self): from numpypy import array diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -2,8 +2,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.buffer import RWBuffer from rpython.rlib import rmmap, rarithmetic +from rpython.rlib.buffer import Buffer from rpython.rlib.rmmap import RValueError, RTypeError, RMMapError if rmmap.HAVE_LARGEFILE_SUPPORT: @@ -17,9 +17,9 @@ self.space = space self.mmap = mmap_obj - def buffer_w(self, space): + def readbuf_w(self, space): self.check_valid() - return MMapBuffer(self.space, self.mmap) + return MMapBuffer(self.space, self.mmap, True) def close(self): self.mmap.close() @@ -286,10 +286,13 @@ mmap_error._dont_inline_ = True -class MMapBuffer(RWBuffer): - def __init__(self, space, mmap): +class MMapBuffer(Buffer): + _immutable_ = True + + def __init__(self, space, mmap, readonly): self.space = space self.mmap = mmap + self.readonly = readonly def getlength(self): return self.mmap.size @@ -303,7 +306,7 @@ if step == 1: return self.mmap.getslice(start, size) else: - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setitem(self, index, char): self.check_valid_writeable() @@ -313,14 +316,6 @@ self.check_valid_writeable() self.mmap.setslice(start, string) - def is_writable(self): - try: - self.mmap.check_writeable() - except RMMapError: - return False - else: - return True - def get_raw_address(self): self.check_valid() return self.mmap.data diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -560,14 +560,24 @@ m = mmap(f.fileno(), 6) m[5] = '?' b = buffer(m) - try: - b[:3] = "FOO" - except TypeError: # on CPython: "buffer is read-only" :-/ - skip("on CPython: buffer is read-only") + exc = raises(TypeError, 'b[:3] = "FOO"') + assert str(exc.value) == "buffer is read-only" m.close() f.seek(0) got = f.read() - assert got == "FOOba?" + assert got == "fooba?" + f.close() + + def test_memoryview(self): + from mmap import mmap + f = open(self.tmpname + "y", "w+") + f.write("foobar") + f.flush() + m = mmap(f.fileno(), 6) + m[5] = '?' + exc = raises(TypeError, memoryview, m) + assert 'buffer interface' in str(exc.value) + m.close() f.close() def test_offset(self): diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -48,13 +48,13 @@ interpleveldefs = { 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', + 'pack_into': 'interp_struct.pack_into', 'unpack': 'interp_struct.unpack', + 'unpack_from': 'interp_struct.unpack_from', 'Struct': 'interp_struct.W_Struct', } appleveldefs = { 'error': 'app_struct.error', - 'pack_into': 'app_struct.pack_into', - 'unpack_from': 'app_struct.unpack_from', } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -2,23 +2,8 @@ """ Application-level definitions for the struct module. """ -import struct class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" - -# XXX inefficient -def pack_into(fmt, buf, offset, *args): - data = struct.pack(fmt, *args) - buffer(buf)[offset:offset+len(data)] = data - -# XXX inefficient -def unpack_from(fmt, buf, offset=0): - size = struct.calcsize(fmt) - data = buffer(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return struct.unpack(fmt, data) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -5,7 +5,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.module.struct.formatiterator import ( PackFormatIterator, UnpackFormatIterator @@ -29,6 +29,7 @@ raise OperationError(w_error, space.wrap(e.msg)) return fmtiter.totalsize + @unwrap_spec(format=str) def pack(space, format, args_w): if jit.isconstant(format): @@ -47,6 +48,23 @@ return space.wrap(fmtiter.result.build()) +# XXX inefficient + at unwrap_spec(format=str, offset=int) +def pack_into(space, format, w_buf, offset, args_w): + res = pack(space, format, args_w).str_w(space) + buf = space.writebuf_w(w_buf) + if offset < 0: + offset += buf.getlength() + size = len(res) + if offset < 0 or (buf.getlength() - offset) < size: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, + "pack_into requires a buffer of at least %d bytes", + size) + buf.setslice(offset, res) + + @unwrap_spec(format=str, input='bufferstr') def unpack(space, format, input): fmtiter = UnpackFormatIterator(space, input) @@ -61,6 +79,27 @@ return space.newtuple(fmtiter.result_w[:]) +# XXX inefficient + at unwrap_spec(format=str, offset=int) +def unpack_from(space, format, w_buf, offset=0): + size = _calcsize(space, format) + buf = space.getarg_w('z*', w_buf) + if buf is None: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, "unpack_from requires a buffer argument") + if offset < 0: + offset += buf.getlength() + if offset < 0 or (buf.getlength() - offset) < size: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, + "unpack_from requires a buffer of at least %d bytes", + size) + data = buf.getslice(offset, offset + size, 1, size) + return unpack(space, format, data) + + class W_Struct(W_Root): _immutable_fields_ = ["format", "size"] diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -2,12 +2,11 @@ Tests for the struct module implemented at interp-level in pypy/module/struct. """ -import py from rpython.rlib.rstruct.nativefmttable import native_is_bigendian class AppTestStruct(object): - spaceconfig = dict(usemodules=['struct']) + spaceconfig = dict(usemodules=['struct', 'array']) def setup_class(cls): """ @@ -26,7 +25,6 @@ """ assert issubclass(self.struct.error, Exception) - def test_calcsize_standard(self): """ Check the standard size of the various format characters. @@ -52,14 +50,12 @@ # test with some repetitions and multiple format characters assert calcsize('=bQ3i') == 1 + 8 + 3*4 - def test_index(self): class X(object): def __index__(self): return 3 assert self.struct.unpack("i", self.struct.pack("i", X()))[0] == 3 - def test_deprecation_warning(self): import warnings for code in 'b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q': @@ -70,7 +66,6 @@ assert str(w[0].message) == "integer argument expected, got non-integer" assert w[0].category is DeprecationWarning - def test_pack_standard_little(self): """ Check packing with the '<' format specifier. @@ -84,7 +79,6 @@ assert pack("' format specifier. @@ -112,7 +105,6 @@ assert pack(">q", -0x41B2B3B4B5B6B7B8) == '\xbeMLKJIHH' assert pack(">Q", 0x8142434445464748) == '\x81BCDEFGH' - def test_unpack_standard_big(self): """ Check unpacking with the '>' format specifier. @@ -126,7 +118,6 @@ assert unpack(">q", '\xbeMLKJIHH') == (-0x41B2B3B4B5B6B7B8,) assert unpack(">Q", '\x81BCDEFGH') == (0x8142434445464748,) - def test_calcsize_native(self): """ Check that the size of the various format characters is reasonable. @@ -156,7 +147,6 @@ assert calcsize('ibb') == calcsize('i') + 2 * calcsize('b') assert calcsize('ih') == calcsize('i') + calcsize('h') - def test_pack_native(self): """ Check packing with the native format. @@ -174,7 +164,6 @@ assert res[sizeofi:] == '\x05' + '\x00' * (sizeofi-1) assert pack("q", -1) == '\xff' * calcsize("q") - def test_unpack_native(self): """ Check unpacking with the native format. @@ -185,7 +174,6 @@ assert unpack("bi", pack("bi", -2, 5)) == (-2, 5) assert unpack("q", '\xff' * calcsize("q")) == (-1,) - def test_string_format(self): """ Check the 's' format character. @@ -200,7 +188,6 @@ assert unpack("5s3s", "worldspa") == ("world", "spa") assert unpack("0s", "") == ("",) - def test_pascal_format(self): """ Check the 'p' format character. @@ -220,7 +207,6 @@ assert unpack("1p", "\x03") == ("",) assert unpack("300p", longpacked300) == (longstring[:255],) - def test_char_format(self): """ Check the 'c' format character. @@ -232,7 +218,6 @@ assert unpack("c", "?") == ("?",) assert unpack("5c", "a\xc0\x00\n-") == ("a", "\xc0", "\x00", "\n", "-") - def test_pad_format(self): """ Check the 'x' format character. @@ -244,7 +229,6 @@ assert unpack("x", "?") == () assert unpack("5x", "hello") == () - def test_native_floats(self): """ Check the 'd' and 'f' format characters on native packing. @@ -261,7 +245,6 @@ assert res != 12.34 # precision lost assert abs(res - 12.34) < 1E-6 - def test_standard_floats(self): """ Check the 'd' and 'f' format characters on standard packing. @@ -280,7 +263,6 @@ def test_bool(self): pack = self.struct.pack - unpack = self.struct.unpack assert pack("!?", True) == '\x01' assert pack(">?", True) == '\x01' assert pack("!?", False) == '\x00' @@ -343,15 +325,12 @@ raises(error, pack, "b", 150) # argument out of range # XXX the accepted ranges still differs between PyPy and CPython - def test_overflow_error(self): """ Check OverflowError cases. """ import sys calcsize = self.struct.calcsize - pack = self.struct.pack - unpack = self.struct.unpack someerror = (OverflowError, self.struct.error) raises(someerror, calcsize, "%dc" % (sys.maxint+1,)) raises(someerror, calcsize, "999999999999999999999999999c") @@ -360,7 +339,6 @@ raises(someerror, calcsize, "c%dc" % (sys.maxint,)) raises(someerror, calcsize, "%dci" % (sys.maxint,)) - def test_unicode(self): """ A PyPy extension: accepts the 'u' format character in native mode, @@ -374,7 +352,6 @@ assert data == str(buffer(u'XYZ')) assert self.struct.unpack("uuu", data) == (u'X', u'Y', u'Z') - def test_unpack_buffer(self): """ Buffer objects can be passed to struct.unpack(). @@ -383,6 +360,36 @@ assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) + def test_pack_unpack_buffer(self): + import array + b = array.array('c', '\x00' * 19) + sz = self.struct.calcsize("ii") + for offset in [2, -17]: + self.struct.pack_into("ii", b, offset, 17, 42) + assert str(buffer(b)) == ('\x00' * 2 + + self.struct.pack("ii", 17, 42) + + '\x00' * (19-sz-2)) + exc = raises(TypeError, self.struct.pack_into, "ii", buffer(b), 0, 17, 42) + assert str(exc.value) == "buffer is read-only" + exc = raises(TypeError, self.struct.pack_into, "ii", 'test', 0, 17, 42) + assert str(exc.value) == "Cannot use string as modifiable buffer" + exc = raises(self.struct.error, self.struct.pack_into, "ii", b[0:1], 0, 17, 42) + assert str(exc.value) == "pack_into requires a buffer of at least 8 bytes" + + assert self.struct.unpack_from("ii", b, 2) == (17, 42) + assert self.struct.unpack_from("ii", b, -17) == (17, 42) + assert self.struct.unpack_from("ii", buffer(b, 2)) == (17, 42) + assert self.struct.unpack_from("ii", buffer(b), 2) == (17, 42) + assert self.struct.unpack_from("ii", memoryview(buffer(b)), 2) == (17, 42) + exc = raises(TypeError, self.struct.unpack_from, "ii", 123) + assert 'must be string or buffer, not int' in str(exc.value) + exc = raises(self.struct.error, self.struct.unpack_from, "ii", None) + assert str(exc.value) == "unpack_from requires a buffer argument" + exc = raises(self.struct.error, self.struct.unpack_from, "ii", '') + assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" + exc = raises(self.struct.error, self.struct.unpack_from, "ii", memoryview('')) + assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" + def test___float__(self): class MyFloat(object): def __init__(self, x): diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -4,6 +4,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.sliceobject import W_SliceObject +from rpython.rlib.buffer import StringBuffer from rpython.rlib.objectmodel import instantiate, we_are_translated, specialize from rpython.rlib.nonconst import NonConstant from rpython.rlib.rarithmetic import r_uint, r_singlefloat @@ -39,6 +40,9 @@ def setclass(self, space, w_subtype): is_root(w_subtype) + def buffer_w(self, space, flags): + return StringBuffer("foobar") + def str_w(self, space): return NonConstant("foobar") @@ -69,6 +73,9 @@ def get_module(self): return w_some_obj() + def get_module_type_name(self): + return self.name + def w_some_obj(): if NonConstant(False): return W_Root() @@ -295,11 +302,6 @@ ec._py_repr = None return ec - def buffer_w(self, w_obj): - from pypy.interpreter.buffer import Buffer - is_root(w_obj) - return Buffer() - def unicode_from_object(self, w_obj): return w_some_obj() @@ -342,7 +344,7 @@ ObjSpace.ExceptionTable + ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict', 'unicode', 'complex', 'slice', 'bool', - 'basestring', 'object', 'bytearray']): + 'basestring', 'object', 'bytearray', 'buffer']): setattr(space, 'w_' + name, w_some_obj()) space.w_type = w_some_type() # diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/bufferobject.py @@ -0,0 +1,159 @@ +""" +Implementation of the 'buffer' and 'memoryview' types. From noreply at buildbot.pypy.org Thu Apr 24 23:43:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 24 Apr 2014 23:43:11 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140424214311.4FE681C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70949:ba1169097696 Date: 2014-04-24 17:38 -0400 http://bitbucket.org/pypy/pypy/changeset/ba1169097696/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,3 +9,6 @@ Improve optimiziation of small allocation-heavy loops in the JIT .. branch: reflex-support + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage From noreply at buildbot.pypy.org Fri Apr 25 00:12:31 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 25 Apr 2014 00:12:31 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test Message-ID: <20140424221231.D9AB51C0E7C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70950:253828267285 Date: 2014-04-24 23:53 +0300 http://bitbucket.org/pypy/pypy/changeset/253828267285/ Log: fix test diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -104,9 +104,11 @@ def f(): if isinstance(udir.as_unicode(), str): _udir = udir.as_unicode() + _res = ', ' else: _udir = udir - return u', '.join(rposix.listdir(_udir)) + _res = u', ' + return _res.join(rposix.listdir(_udir)) result = interpret(f, []) assert os.path.basename(self.ufilename) in ll_to_string(result) else: From noreply at buildbot.pypy.org Fri Apr 25 01:38:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 01:38:56 +0200 (CEST) Subject: [pypy-commit] pypy default: array.fromstring accepts buffers Message-ID: <20140424233856.542B11D2C86@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70951:a6ca15aed189 Date: 2014-04-24 19:33 -0400 http://bitbucket.org/pypy/pypy/changeset/a6ca15aed189/ Log: array.fromstring accepts buffers diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -42,7 +42,7 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] if space.type(w_initializer) is space.w_str: - a.descr_fromstring(space, space.str_w(w_initializer)) + a.descr_fromstring(space, w_initializer) elif space.type(w_initializer) is space.w_list: a.descr_fromlist(space, w_initializer) else: @@ -232,13 +232,13 @@ self._charbuf_stop() return self.space.wrap(s) - @unwrap_spec(s=str) - def descr_fromstring(self, space, s): + def descr_fromstring(self, space, w_s): """ fromstring(string) Appends items from the string, interpreting it as an array of machine values,as if it had been read from a file using the fromfile() method). """ + s = space.getarg_w('s#', w_s) if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) @@ -270,10 +270,10 @@ elems = max(0, len(item) - (len(item) % self.itemsize)) if n != 0: item = item[0:elems] - self.descr_fromstring(space, item) + self.descr_fromstring(space, space.wrap(item)) msg = "not enough items in file" raise OperationError(space.w_EOFError, space.wrap(msg)) - self.descr_fromstring(space, item) + self.descr_fromstring(space, w_item) @unwrap_spec(w_f=W_File) def descr_tofile(self, space, w_f): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -155,6 +155,11 @@ a.fromstring('Hi!') assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3 a = self.array('c') + a.fromstring(buffer('xyz')) + exc = raises(TypeError, a.fromstring, memoryview('xyz')) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + assert a[0] == 'x' and a[1] == 'y' and a[2] == 'z' and len(a) == 3 + a = self.array('c') a.fromstring('') assert not len(a) From noreply at buildbot.pypy.org Fri Apr 25 01:59:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 01:59:38 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default (ea86924e88fb) Message-ID: <20140424235938.6D9AA1C03FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70952:e0ce5503a026 Date: 2014-04-24 16:58 -0700 http://bitbucket.org/pypy/pypy/changeset/e0ce5503a026/ Log: merge default (ea86924e88fb) diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -38,6 +38,24 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; +class PseudoExample01 { +public: + PseudoExample01() : m_somedata(-99) {} + PseudoExample01(int a) : m_somedata(a) {} + PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} + PseudoExample01& operator=(const PseudoExample01& e) { + if (this != &e) m_somedata = e.m_somedata; + return *this; + } + virtual ~PseudoExample01() {} + +public: + int m_somedata; +}; + +static int example01_last_static_method = 0; +static int example01_last_constructor = 0; + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- @@ -46,27 +64,62 @@ std::vector methods; - // static double staticAddToDouble(double a); + // ( 0) static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); - // static int staticAddOneToInt(int a); - // static int staticAddOneToInt(int a, int b); + // ( 1) static int staticAddOneToInt(int a) + // ( 2) static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); - // static int staticAtoi(const char* str); + // ( 3) static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); - // static char* staticStrcpy(const char* strin); + // ( 4) static char* staticStrcpy(const char* strin) methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // ( 5) static void staticSetPayload(payload* p, double d) + // ( 6) static payload* staticCyclePayload(payload* p, double d) + // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + argtypes.clear(); + argtypes.push_back("payload*"); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + + // ( 8) static int getCount() + // ( 9) static void setCount(int) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + + // cut-off is used in cppyy_is_static + example01_last_static_method = methods.size(); + + // (10) example01() + // (11) example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + + // cut-off is used in cppyy_is_constructor + example01_last_constructor = methods.size(); + + // (12) double addDataToDouble(double a) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; // -- class example01 @@ -98,47 +151,69 @@ } +/* memory management ------------------------------------------------------ */ +void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { + if (handle == s_handles["example01"]) + delete (PseudoExample01*)self; +} + + /* method/function dispatching -------------------------------------------- */ -template -static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - T result = T(); +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + int result = 0; switch ((long)method) { - case 0: // double staticAddToDouble(double) - assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; - break; - case 1: // int staticAddOneToInt(int) + case 1: // static int staticAddOneToInt(int) assert(!self && nargs == 1); result = ((CPPYY_G__value*)args)[0].obj.in + 1; break; - case 2: // int staticAddOneToInt(int, int) + case 2: // static int staticAddOneToInt(int, int) assert(!self && nargs == 2); result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; break; - case 3: // int staticAtoi(const char* str) + case 3: // static int staticAtoi(const char* str) assert(!self && nargs == 1); result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; + case 8: // static int getCount() + assert(!self && nargs == 0); + // can't actually call this method (would need to resolve example01::count), but + // other than the memory tests, most tests just check for 0 at the end + result = 0; + break; default: + assert(!"method unknown in cppyy_call_i"); break; } return result; } -int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); -} - long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + if ((long)method == 4) { // static char* staticStrcpy(const char* strin) + const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); + char* strout = (char*)malloc(::strlen(strin)+1); + ::strcpy(strout, strin); + return (long)strout; + } + assert(!"method unknown in cppyy_call_l"); + return 0; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); + double result = 0.; + switch ((long)method) { + case 0: // static double staticAddToDouble(double) + assert(!self && nargs == 1); + result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + break; + case 12: // double addDataToDouble(double a) + assert(self && nargs == 1); + result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + break; + default: + assert(!"method unknown in cppyy_call_d"); + break; + } + return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { @@ -149,10 +224,31 @@ return strout; } +cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { + void* result = 0; + if (handle == s_handles["example01"]) { + switch ((long)method) { + case 10: + assert(nargs == 0); + result = new PseudoExample01; + break; + case 11: + assert(nargs == 1); + result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + break; + default: + assert(!"method unknown in cppyy_constructor"); + break; + } + } + return (cppyy_object_t)result; +} + cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { return (cppyy_methptrgetter_t)0; } + /* handling of function argument buffer ----------------------------------- */ void* cppyy_allocate_function_args(size_t nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); @@ -200,7 +296,11 @@ } int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { - return 1; + return 0; +} + +int cppyy_num_bases(cppyy_type_t /*handle*/) { + return 0; } @@ -252,11 +352,16 @@ /* method properties ----------------------------------------------------- */ -int cppyy_is_constructor(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return example01_last_static_method <= method_index + && method_index < example01_last_constructor; return 0; } -int cppyy_is_staticmethod(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return method_index < example01_last_static_method ? 1 : 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,11 +7,12 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not item.location[0] in ['test_helper.py', 'test_cppyy.py']: + if not ('test_helper.py' in item.location[0] or \ + 'test_cppyy.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if item.location[0] == 'test_cppyy.py' and \ - not re.search("test0[1-3]", item.location[2]): + if 'test_cppyy.py' in item.location[0] and \ + not re.search("test0[1-36]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_configure(config): diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -330,21 +330,8 @@ def invalid_typecode(space, u, tc): - # %r not supported in rpython - #u.raise_exc('invalid typecode in unmarshal: %r' % tc) - c = ord(tc) - if c < 16: - s = '\\x0%x' % c - elif c < 32 or c > 126: - s = '\\x%x' % c - elif tc == '\\': - s = r'\\' - else: - s = tc - q = "'" - if s[0] == "'": - q = '"' - u.raise_exc('invalid typecode in unmarshal: ' + q + s + q) + u.raise_exc("bad marshal data (unknown type code)") + def register(codes, func): """NOT_RPYTHON""" diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -15,11 +15,14 @@ print(repr(s)) x = marshal.loads(s) assert x == case and type(x) is type(case) - f = BytesIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case and type(x) is type(case) + + import sys + if '__pypy__' in sys.builtin_module_names: + f = StringIO.StringIO() + marshal.dump(case, f) + f.seek(0) + x = marshal.load(f) + assert x == case and type(x) is type(case) return x def test_None(self): @@ -190,8 +193,8 @@ def test_bad_typecode(self): import marshal - exc = raises(ValueError, marshal.loads, b'\x01') - assert r"'\x01'" in str(exc.value) + exc = raises(ValueError, marshal.loads, chr(1)) + assert str(exc.value) == "bad marshal data (unknown type code)" def test_bad_data(self): import marshal From noreply at buildbot.pypy.org Fri Apr 25 02:41:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 02:41:32 +0200 (CEST) Subject: [pypy-commit] pypy default: fix usage of bufferstr_w in _winreg Message-ID: <20140425004132.E95A81C02F2@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70953:b78e00087934 Date: 2014-04-24 17:37 -0700 http://bitbucket.org/pypy/pypy/changeset/b78e00087934/ Log: fix usage of bufferstr_w in _winreg diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError, wrap_windowserror +from pypy.interpreter.error import OperationError, wrap_windowserror, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32 from rpython.rlib.rarithmetic import r_uint, intmask @@ -327,7 +327,14 @@ buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') buf[0] = '\0' else: - value = space.bufferstr_w(w_value) + try: + value = w_value.readbuf_w(space) + except TypeError: + raise oefmt(space.w_TypeError, + "Objects of type '%T' can not be used as binary " + "registry values", w_value) + else: + value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -137,11 +137,15 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx + from _winreg import CreateKey, SetValueEx, REG_BINARY key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) + exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, + REG_BINARY, memoryview('abc')) + assert str(exc.value) == ("Objects of type 'memoryview' can not " + "be used as binary registry values") def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey From noreply at buildbot.pypy.org Fri Apr 25 03:07:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 03:07:41 +0200 (CEST) Subject: [pypy-commit] pypy default: reduce diff with upstream Message-ID: <20140425010741.DFDCD1C0E7C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70954:ba11a9b9aab5 Date: 2014-04-24 21:03 -0400 http://bitbucket.org/pypy/pypy/changeset/ba11a9b9aab5/ Log: reduce diff with upstream diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() From noreply at buildbot.pypy.org Fri Apr 25 03:17:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 03:17:23 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_bytebuffer on win32 Message-ID: <20140425011723.A45671C0E7C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70955:8a2e9e0c1676 Date: 2014-04-24 21:16 -0400 http://bitbucket.org/pypy/pypy/changeset/8a2e9e0c1676/ Log: fix test_bytebuffer on win32 diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -25,5 +25,6 @@ assert str(b) == "\x00xy" + "\x00" * 7 b[4:8:2] = 'zw' assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 - b[6:10] = u'#' - assert str(b) == "\x00xy\x00z\x00#" + "\x00" * 3 + r = str(buffer(u'#')) + b[6:6+len(r)] = u'#' + assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r From noreply at buildbot.pypy.org Fri Apr 25 03:37:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 03:37:12 +0200 (CEST) Subject: [pypy-commit] pypy default: close it when finished Message-ID: <20140425013712.577321C3512@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70956:308ded060ac8 Date: 2014-04-24 18:34 -0700 http://bitbucket.org/pypy/pypy/changeset/308ded060ac8/ Log: close it when finished diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,9 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + try: + imp.load_module('_testcapi', fp, filename, description) + finally: + fp.close() except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) From noreply at buildbot.pypy.org Fri Apr 25 03:37:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 03:37:13 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: close it when finished Message-ID: <20140425013713.933941C3512@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70957:4cbca19dc221 Date: 2014-04-24 18:34 -0700 http://bitbucket.org/pypy/pypy/changeset/4cbca19dc221/ Log: close it when finished (grafted from 308ded060ac8ffbf1b1a16d88486558980b4bc1b) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,9 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + try: + imp.load_module('_testcapi', fp, filename, description) + finally: + fp.close() except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) From noreply at buildbot.pypy.org Fri Apr 25 03:45:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 03:45:09 +0200 (CEST) Subject: [pypy-commit] pypy default: close it in _ctypes_test too Message-ID: <20140425014509.2AD7B1C03FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70958:b2c55136868a Date: 2014-04-24 18:44 -0700 http://bitbucket.org/pypy/pypy/changeset/b2c55136868a/ Log: close it in _ctypes_test too diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -13,9 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - try: + with fp: imp.load_module('_testcapi', fp, filename, description) - finally: - fp.close() except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) From noreply at buildbot.pypy.org Fri Apr 25 04:04:32 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 04:04:32 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support: disable tests when -A and no genreflex Message-ID: <20140425020432.C63EB1C03FC@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70960:377369757ddc Date: 2014-04-24 19:03 -0700 http://bitbucket.org/pypy/pypy/changeset/377369757ddc/ Log: merge reflex-support: disable tests when -A and no genreflex diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -15,6 +15,10 @@ not re.search("test0[1-36]", item.location[2]): py.test.skip("genreflex is not installed") +def pytest_ignore_collect(path, config): + if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: + return True # "can't run dummy tests in -A" + def pytest_configure(config): if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi @@ -22,6 +26,9 @@ import ctypes ctypes.CDLL(lcapi.reflection_library) except Exception, e: + if config.option.runappdirect: + return # "can't run dummy tests in -A" + # build dummy backend (which has reflex info and calls hard-wired) import os from rpython.translator.tool.cbuild import ExternalCompilationInfo From noreply at buildbot.pypy.org Fri Apr 25 04:04:31 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 04:04:31 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: disable all tests if run with -A and genreflex not available Message-ID: <20140425020431.9F3141C03FC@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70959:534ad693bc66 Date: 2014-04-24 19:02 -0700 http://bitbucket.org/pypy/pypy/changeset/534ad693bc66/ Log: disable all tests if run with -A and genreflex not available diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -15,6 +15,10 @@ not re.search("test0[1-36]", item.location[2]): py.test.skip("genreflex is not installed") +def pytest_ignore_collect(path, config): + if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: + return True # "can't run dummy tests in -A" + def pytest_configure(config): if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi @@ -22,6 +26,9 @@ import ctypes ctypes.CDLL(lcapi.reflection_library) except Exception, e: + if config.option.runappdirect: + return # "can't run dummy tests in -A" + # build dummy backend (which has reflex info and calls hard-wired) import os from rpython.translator.tool.cbuild import ExternalCompilationInfo From noreply at buildbot.pypy.org Fri Apr 25 08:54:39 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 08:54:39 +0200 (CEST) Subject: [pypy-commit] pypy default: fix typo in doc Message-ID: <20140425065439.D628C1C03FC@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70961:61191b4a5eac Date: 2014-04-24 23:53 -0700 http://bitbucket.org/pypy/pypy/changeset/61191b4a5eac/ Log: fix typo in doc diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -583,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. From noreply at buildbot.pypy.org Fri Apr 25 09:32:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 25 Apr 2014 09:32:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue1742 resolved Message-ID: <20140425073215.A085D1C3448@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70962:a65f77b101c2 Date: 2014-04-25 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/a65f77b101c2/ Log: Issue1742 resolved Fix the path after the directory structure change (thanks peter). diff --git a/pypy/sandbox/pypy_interact.py b/pypy/sandbox/pypy_interact.py --- a/pypy/sandbox/pypy_interact.py +++ b/pypy/sandbox/pypy_interact.py @@ -21,7 +21,7 @@ """ import sys, os -sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))) from rpython.translator.sandbox.sandlib import SimpleIOSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSandboxedProc from rpython.translator.sandbox.vfs import Dir, RealDir, RealFile From noreply at buildbot.pypy.org Fri Apr 25 09:52:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 25 Apr 2014 09:52:15 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify, shell=True makes this work as is Message-ID: <20140425075215.C7CD81C1008@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70964:a55bcb4345c9 Date: 2014-04-25 10:39 +0300 http://bitbucket.org/pypy/pypy/changeset/a55bcb4345c9/ Log: simplify, shell=True makes this work as is diff --git a/rpython/translator/platform/test/test_posix.py b/rpython/translator/platform/test/test_posix.py --- a/rpython/translator/platform/test/test_posix.py +++ b/rpython/translator/platform/test/test_posix.py @@ -9,13 +9,8 @@ res = host.execute('echo', '42 24') assert res.out == '42 24\n' - if sys.platform == 'win32': - # echo is a shell builtin on Windows - res = host.execute('cmd', ['/c', 'echo', '42', '24']) - assert res.out == '42 24\n' - else: - res = host.execute('echo', ['42', '24']) - assert res.out == '42 24\n' + res = host.execute('echo', ['42', '24']) + assert res.out == '42 24\n' class TestMakefile(object): platform = host From noreply at buildbot.pypy.org Fri Apr 25 09:52:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 25 Apr 2014 09:52:14 +0200 (CEST) Subject: [pypy-commit] pypy default: disable sandbox on windows, fix tests Message-ID: <20140425075214.925681C1008@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70963:6ef1921d8d68 Date: 2014-04-25 10:09 +0300 http://bitbucket.org/pypy/pypy/changeset/6ef1921d8d68/ Log: disable sandbox on windows, fix tests diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -3,6 +3,10 @@ trampolines that marshal their input arguments, dump them to STDOUT, and wait for an answer on STDIN. Enable with 'translate.py --sandbox'. """ +import sys +if sys.platform == 'win32': + raise TypeError("sandbox not supported on windows") + import py from rpython.rlib import rmarshal, types diff --git a/rpython/translator/sandbox/test/test_sandbox.py b/rpython/translator/sandbox/test/test_sandbox.py --- a/rpython/translator/sandbox/test/test_sandbox.py +++ b/rpython/translator/sandbox/test/test_sandbox.py @@ -25,7 +25,20 @@ check_str_without_nul=True) return str(t.compile()) +unsupported_platform = ('False', '') +if sys.platform == 'win32': + unsupported_platform = ('True', 'sandbox not supported on this platform') + def test_unavailable(): + def entry_point(argv): + fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) + os.close(fd) + return 0 + exc = py.test.raises(TypeError, compile, entry_point) + assert str(exc).find('not supported') >= 0 +supported = py.test.mark.skipif(unsupported_platform[0], reason=unsupported_platform[1]) + + at supported def test_open_dup(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -43,6 +56,7 @@ f.close() assert tail == "" + at supported def test_read_write(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -65,6 +79,7 @@ f.close() assert tail == "" + at supported def test_dup2_access(): def entry_point(argv): os.dup2(34, 56) @@ -80,6 +95,7 @@ f.close() assert tail == "" + at supported def test_stat_ftruncate(): from rpython.translator.sandbox.sandlib import RESULTTYPE_STATRESULT from rpython.rlib.rarithmetic import r_longlong @@ -101,6 +117,7 @@ f.close() assert tail == "" + at supported def test_time(): def entry_point(argv): t = time.time() @@ -116,6 +133,7 @@ f.close() assert tail == "" + at supported def test_getcwd(): def entry_point(argv): t = os.getcwd() @@ -131,6 +149,7 @@ f.close() assert tail == "" + at supported def test_oserror(): def entry_point(argv): try: @@ -148,6 +167,7 @@ f.close() assert tail == "" + at supported def test_hybrid_gc(): def entry_point(argv): l = [] @@ -172,6 +192,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported def test_segfault_1(): class A: def __init__(self, m): @@ -194,6 +215,7 @@ e.close() assert 'Invalid RPython operation' in errors + at supported def test_segfault_2(): py.test.skip("hum, this is one example, but we need to be very careful") class Base: @@ -226,6 +248,7 @@ e.close() assert '...think what kind of errors to get...' in errors + at supported def test_safe_alloc(): from rpython.rlib.rmmap import alloc, free @@ -246,6 +269,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported def test_unsafe_mmap(): py.test.skip("Since this stuff is unimplemented, it won't work anyway " "however, the day it starts working, it should pass test") @@ -271,6 +295,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported class TestPrintedResults: def run(self, entry_point, args, expected): diff --git a/rpython/translator/sandbox/test/test_sandlib.py b/rpython/translator/sandbox/test/test_sandlib.py --- a/rpython/translator/sandbox/test/test_sandlib.py +++ b/rpython/translator/sandbox/test/test_sandlib.py @@ -6,10 +6,10 @@ from rpython.translator.sandbox.sandlib import SimpleIOSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSocketProc -from rpython.translator.sandbox.test.test_sandbox import compile +from rpython.translator.sandbox.test.test_sandbox import compile, supported from rpython.translator.sandbox.vfs import Dir, File, RealDir, RealFile - + at supported class MockSandboxedProc(SandboxedProc): """A sandbox process wrapper that replays expected syscalls.""" @@ -35,7 +35,7 @@ do_ll_os__ll_os_write = _make_method("write") do_ll_os__ll_os_close = _make_method("close") - + at supported def test_lib(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -63,6 +63,7 @@ proc.handle_forever() assert proc.seen == len(proc.expected) + at supported def test_foobar(): py.test.skip("to be updated") foobar = rffi.llexternal("foobar", [rffi.CCHARP], rffi.LONG) @@ -79,6 +80,7 @@ proc.handle_forever() assert proc.seen == len(proc.expected) + at supported def test_simpleio(): def entry_point(argv): print "Please enter a number:" @@ -100,6 +102,7 @@ assert output == "Please enter a number:\nThe double is: 42\n" assert error == "" + at supported def test_socketio(): class SocketProc(VirtualizedSocketProc, SimpleIOSandboxedProc): def build_virtual_root(self): @@ -116,6 +119,7 @@ output, error = proc.communicate("") assert output.startswith('HTTP/1.0 503 Service Unavailable') + at supported def test_oserror(): def entry_point(argv): try: @@ -133,6 +137,7 @@ assert proc.seen == len(proc.expected) + at supported class SandboxedProcWithFiles(VirtualizedSandboxedProc, SimpleIOSandboxedProc): """A sandboxed process with a simple virtualized filesystem. @@ -145,6 +150,7 @@ 'this.pyc': RealFile(__file__), }) + at supported def test_too_many_opens(): def entry_point(argv): try: @@ -186,6 +192,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_fstat(): def compare(a, b, i): if a != b: @@ -219,6 +226,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_lseek(): def char_should_be(c, should): if c != should: @@ -248,6 +256,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_getuid(): def entry_point(argv): import os diff --git a/rpython/translator/sandbox/test/test_vfs.py b/rpython/translator/sandbox/test/test_vfs.py --- a/rpython/translator/sandbox/test/test_vfs.py +++ b/rpython/translator/sandbox/test/test_vfs.py @@ -2,10 +2,13 @@ import sys, stat, os from rpython.translator.sandbox.vfs import * from rpython.tool.udir import udir +from rpython.translator.sandbox.test.test_sandbox import unsupported_platform HASLINK = hasattr(os, 'symlink') def setup_module(mod): + if unsupported_platform[0] == 'True': + py.test.skip(unsupported_platform[1]) d = udir.ensure('test_vfs', dir=1) d.join('file1').write('somedata1') d.join('file2').write('somelongerdata2') From noreply at buildbot.pypy.org Fri Apr 25 10:15:21 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 10:15:21 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into reflex-support branch Message-ID: <20140425081521.230411C11B1@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70965:846c0fc5d749 Date: 2014-04-24 23:55 -0700 http://bitbucket.org/pypy/pypy/changeset/846c0fc5d749/ Log: merge default into reflex-support branch diff too long, truncating to 2000 out of 4209 lines diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + with fp: + imp.load_module('_testcapi', fp, filename, description) except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. @@ -577,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,7 +5,10 @@ .. this is a revision shortly after release-2.3.x .. startrev: ba569fe1efdb - - .. branch: small-unroll-improvements Improve optimiziation of small allocation-heavy loops in the JIT + +.. branch: reflex-support + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) + compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX @@ -194,13 +194,37 @@ def immutable_unique_id(self, space): return None - def buffer_w(self, space): + def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): - return w_result.buffer_w(space) - self._typed_unwrap_error(space, "buffer") + return w_result.buffer_w(space, flags) + raise TypeError + + def readbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.readbuf_w(space) + raise TypeError + + def writebuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.writebuf_w(space) + raise TypeError + + def charbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.charbuf_w(space) + raise TypeError def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1340,25 +1364,111 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - def buffer_w(self, w_obj): - return w_obj.buffer_w(self) + BUF_SIMPLE = 0x0000 + BUF_WRITABLE = 0x0001 + BUF_FORMAT = 0x0004 + BUF_ND = 0x0008 + BUF_STRIDES = 0x0010 | BUF_ND + BUF_INDIRECT = 0x0100 | BUF_STRIDES - def rwbuffer_w(self, w_obj): - # returns a RWBuffer instance - from pypy.interpreter.buffer import RWBuffer - buffer = self.buffer_w(w_obj) - if not isinstance(buffer, RWBuffer): - raise OperationError(self.w_TypeError, - self.wrap('read-write buffer expected')) - return buffer + BUF_CONTIG_RO = BUF_ND + BUF_CONTIG = BUF_ND | BUF_WRITABLE - def bufferstr_new_w(self, w_obj): - # Implement the "new buffer interface" (new in Python 2.7) - # returning an unwrapped string. It doesn't accept unicode - # strings - buffer = self.buffer_w(w_obj) - return buffer.as_str() + BUF_FULL_RO = BUF_INDIRECT | BUF_FORMAT + BUF_FULL = BUF_INDIRECT | BUF_FORMAT | BUF_WRITABLE + def check_buf_flags(self, flags, readonly): + if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: + raise oefmt(self.w_BufferError, "Object is not writable.") + + def buffer_w(self, w_obj, flags): + # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) + try: + return w_obj.buffer_w(self, flags) + except TypeError: + raise oefmt(self.w_TypeError, + "'%T' does not have the buffer interface", w_obj) + + def readbuf_w(self, w_obj): + # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) + try: + return w_obj.readbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a readable buffer object") + + def writebuf_w(self, w_obj): + # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) + try: + return w_obj.writebuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a writeable buffer object") + + def charbuf_w(self, w_obj): + # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) + try: + return w_obj.charbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a character buffer object") + + def _getarg_error(self, expected, w_obj): + if self.is_none(w_obj): + name = "None" + else: + name = self.type(w_obj).get_module_type_name() + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + + @specialize.arg(1) + def getarg_w(self, code, w_obj): + if code == 'z*': + if self.is_none(w_obj): + return None + code = 's*' + if code == 's*': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.readbuf_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).readbuf_w(self) + try: + return w_obj.buffer_w(self, 0) + except TypeError: + pass + try: + return w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + elif code == 's#': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.str_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).str_w(self) + try: + return w_obj.readbuf_w(self).as_str() + except TypeError: + self._getarg_error("string or read-only buffer", w_obj) + elif code == 'w*': + try: + try: + return w_obj.buffer_w(self, self.BUF_WRITABLE) + except OperationError: + self._getarg_error("read-write buffer", w_obj) + except TypeError: + pass + try: + return w_obj.writebuf_w(self) + except TypeError: + self._getarg_error("read-write buffer", w_obj) + elif code == 't#': + try: + return w_obj.charbuf_w(self) + except TypeError: + self._getarg_error("string or read-only character buffer", w_obj) + else: + assert False + + # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): @@ -1373,8 +1483,18 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - buffer = self.buffer_w(w_obj) - return buffer.as_str() + try: + buf = w_obj.buffer_w(self, 0) + except TypeError: + pass + else: + return buf.as_str() + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + else: + return buf.as_str() def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): @@ -1721,6 +1841,7 @@ 'AssertionError', 'AttributeError', 'BaseException', + 'BufferError', 'DeprecationWarning', 'EOFError', 'EnvironmentError', diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py deleted file mode 100644 --- a/pypy/interpreter/buffer.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Buffer protocol support. -""" -from rpython.rlib.objectmodel import import_from_mixin - - -class Buffer(object): - """Abstract base class for buffers.""" - __slots__ = [] - - def getlength(self): - raise NotImplementedError - - def as_str(self): - "Returns an interp-level string with the whole content of the buffer." - # May be overridden. - return self.getslice(0, self.getlength(), 1, self.getlength()) - - def getitem(self, index): - "Returns the index'th character in the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def getslice(self, start, stop, step, size): - # May be overridden. No bounds checks. - return ''.join([self.getitem(i) for i in range(start, stop, step)]) - - def get_raw_address(self): - raise ValueError("no raw buffer") - - def is_writable(self): - return False - - -class RWBuffer(Buffer): - """Abstract base class for read-write buffers.""" - __slots__ = [] - - def is_writable(self): - return True - - def setitem(self, index, char): - "Write a character into the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def setslice(self, start, string): - # May be overridden. No bounds checks. - for i in range(len(string)): - self.setitem(start + i, string[i]) - - -class StringBuffer(Buffer): - __slots__ = ['value'] - - def __init__(self, value): - self.value = value - - def getlength(self): - return len(self.value) - - def as_str(self): - return self.value - - def getitem(self, index): - return self.value[index] - - def getslice(self, start, stop, step, size): - if size == 0: - return "" - if step == 1: - assert 0 <= start <= stop - return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) -# ____________________________________________________________ - - -class SubBufferMixin(object): - _attrs_ = ['buffer', 'offset', 'size'] - - def __init__(self, buffer, offset, size): - self.buffer = buffer - self.offset = offset - self.size = size - - def getlength(self): - at_most = self.buffer.getlength() - self.offset - if 0 <= self.size <= at_most: - return self.size - elif at_most >= 0: - return at_most - else: - return 0 - - def getitem(self, index): - return self.buffer.getitem(self.offset + index) - - def getslice(self, start, stop, step, size): - if start == stop: - return '' # otherwise, adding self.offset might make them - # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, - step, size) - - -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) - - -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) - - def setitem(self, index, char): - self.buffer.setitem(self.offset + index, char) - - def setslice(self, start, string): - if len(string) == 0: - return # otherwise, adding self.offset might make 'start' - # out of bounds - self.buffer.setslice(self.offset + start, string) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -881,8 +881,8 @@ def LOAD_NAME(self, nameindex, next_instr): if self.w_locals is not self.w_globals: - w_varname = self.getname_w(nameindex) - w_value = self.space.finditem(self.w_locals, w_varname) + varname = self.getname_u(nameindex) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py deleted file mode 100644 --- a/pypy/interpreter/test/test_buffer.py +++ /dev/null @@ -1,43 +0,0 @@ -import py -from rpython.tool.udir import udir - -testdir = udir.ensure('test_buffer', dir=1) - - -class TestBuffer: - def test_buffer_w(self): - space = self.space - w_hello = space.wrap('hello world') - buf = space.buffer_w(w_hello) - assert buf.getlength() == 11 - assert buf.as_str() == 'hello world' - assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.newbuffer(buf)) is buf - assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' - space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - - def test_file_write(self): - space = self.space - w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) - filename = str(testdir.join('test_file_write')) - space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): - f = open(filename, 'wb') - f.write(buffer) - f.close() - """) - f = open(filename, 'rb') - data = f.read() - f.close() - assert data == 'hello world' - - def test_unicode(self): - space = self.space - s = space.bufferstr_w(space.wrap(u'hello')) - assert type(s) is str - assert s == 'hello' - space.raises_w(space.w_UnicodeEncodeError, - space.bufferstr_w, space.wrap(u'\xe9')) - - -# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,13 +2,16 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec -class ByteBuffer(RWBuffer): +class ByteBuffer(Buffer): + _immutable_ = True + def __init__(self, len): self.data = ['\x00'] * len + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -13,3 +13,18 @@ assert b[-1] == '*' assert b[-2] == '-' assert b[-3] == '+' + exc = raises(TypeError, "b[3] = 'abc'") + assert str(exc.value) == "right operand must be a single byte" + exc = raises(TypeError, "b[3:5] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + exc = raises(TypeError, "b[3:7:2] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + + b = bytebuffer(10) + b[1:3] = 'xy' + assert str(b) == "\x00xy" + "\x00" * 7 + b[4:8:2] = 'zw' + assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 + r = str(buffer(u'#')) + b[6:6+len(r)] = u'#' + assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,21 +1,22 @@ -from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer +from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw -class LLBuffer(RWBuffer): +class LLBuffer(Buffer): _immutable_ = True def __init__(self, raw_cdata, size): self.raw_cdata = raw_cdata self.size = size + self.readonly = False def getlength(self): return self.size @@ -32,7 +33,7 @@ def getslice(self, start, stop, step, size): if step == 1: return rffi.charpsize2str(rffi.ptradd(self.raw_cdata, start), size) - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) @@ -46,6 +47,14 @@ W_Buffer.__init__(self, buffer) self.keepalive = keepalive + def descr_setitem(self, space, w_index, w_obj): + try: + W_Buffer.descr_setitem(self, space, w_index, w_obj) + except OperationError as e: + if e.match(space, space.w_TypeError): + e.w_type = space.w_ValueError + raise + MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -72,8 +72,8 @@ 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', - 'charbuffer_encode': 'interp_codecs.buffer_encode', - 'readbuffer_encode': 'interp_codecs.buffer_encode', + 'charbuffer_encode': 'interp_codecs.charbuffer_encode', + 'readbuffer_encode': 'interp_codecs.readbuffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', 'charmap_encode' : 'interp_codecs.charmap_encode', 'escape_encode' : 'interp_codecs.escape_encode', diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -321,8 +321,14 @@ w_res = space.call_function(w_encoder, w_obj, space.wrap(errors)) return space.getitem(w_res, space.wrap(0)) - at unwrap_spec(s='bufferstr', errors='str_or_None') -def buffer_encode(space, s, errors='strict'): + at unwrap_spec(errors='str_or_None') +def readbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('s#', w_data) + return space.newtuple([space.wrap(s), space.wrap(len(s))]) + + at unwrap_spec(errors='str_or_None') +def charbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('t#', w_data) return space.newtuple([space.wrap(s), space.wrap(len(s))]) @unwrap_spec(errors=str) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -420,9 +420,13 @@ for (i, line) in enumerate(reader): assert line == s[i] - def test_array(self): + def test_buffer_encode(self): import _codecs, array - _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + assert _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + exc = raises(TypeError, _codecs.charbuffer_encode, array.array('c', 'spam')) + assert str(exc.value) == "must be string or read-only character buffer, not array.array" + assert _codecs.readbuffer_encode(u"test") == ('test', 4) + assert _codecs.charbuffer_encode(u"test") == ('test', 4) def test_utf8sig(self): import codecs diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -267,9 +267,14 @@ def direct_write(self, w_data): space = self.space - if not self.binary and space.isinstance_w(w_data, space.w_unicode): - w_data = space.call_method(w_data, "encode", space.wrap(self.encoding), space.wrap(self.errors)) - data = space.bufferstr_w(w_data) + if self.binary: + data = space.getarg_w('s*', w_data).as_str() + else: + if space.isinstance_w(w_data, space.w_unicode): + w_data = space.call_method(w_data, "encode", + space.wrap(self.encoding), + space.wrap(self.errors)) + data = space.charbuf_w(w_data) self.do_direct_write(data) def do_direct_write(self, data): @@ -469,7 +474,7 @@ """readinto() -> Undocumented. Don't use this; it may go away.""" # XXX not the most efficient solution as it doesn't avoid the copying space = self.space - rwbuffer = space.rwbuffer_w(w_rwbuffer) + rwbuffer = space.writebuf_w(w_rwbuffer) w_data = self.file_read(rwbuffer.getlength()) data = space.str_w(w_data) rwbuffer.setslice(0, data) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -80,7 +80,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) @@ -101,11 +101,14 @@ readinto = interp2app(W_BufferedIOBase.readinto_w), ) -class RawBuffer(RWBuffer): +class RawBuffer(Buffer): + _immutable_ = True + def __init__(self, buf, start, length): self.buf = buf self.start = start self.length = length + self.readonly = False def getlength(self): return self.length @@ -698,7 +701,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -41,7 +41,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) size = rwbuffer.getlength() output = self.read(size) @@ -50,10 +50,7 @@ def write_w(self, space, w_data): self._check_closed(space) - if space.isinstance_w(w_data, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - "bytes string of buffer expected")) - buf = space.bufferstr_w(w_data) + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -333,7 +333,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() try: n = os.write(self.fd, data) @@ -366,7 +366,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,14 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\ncxxxxx' @@ -235,7 +243,8 @@ import _io raw = _io.FileIO(self.tmpfile, 'w') f = _io.BufferedWriter(raw) - f.write("abcd") + f.write("ab") + f.write(u"cd") f.close() assert self.readfile() == "abcd" diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -38,6 +38,8 @@ f = _io.BytesIO() assert f.write("") == 0 assert f.write("hello") == 5 + exc = raises(TypeError, f.write, u"lo") + assert str(exc.value) == "'unicode' does not have the buffer interface" import gc; gc.collect() assert f.getvalue() == "hello" f.close() @@ -97,6 +99,14 @@ a2 = bytearray('testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, b.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() assert a1 == "h" assert a2 == "elloing" diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -82,7 +82,8 @@ import _io filename = self.tmpfile + '_w' f = _io.FileIO(filename, 'wb') - f.write("test") + f.write("te") + f.write(u"st") # try without flushing f2 = _io.FileIO(filename, 'rb') assert f2.read() == "test" @@ -135,6 +136,14 @@ a = bytearray('x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\nc\0\0\0\0\0' # diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -80,8 +80,9 @@ raise OperationError(space.w_IOError, space.wrap("connection is read-only")) - @unwrap_spec(buf='bufferstr', offset='index', size='index') - def send_bytes(self, space, buf, offset=0, size=PY_SSIZE_T_MIN): + @unwrap_spec(offset='index', size='index') + def send_bytes(self, space, w_buf, offset=0, size=PY_SSIZE_T_MIN): + buf = space.getarg_w('s*', w_buf).as_str() length = len(buf) self._check_writable(space) if offset < 0: @@ -122,7 +123,7 @@ @unwrap_spec(offset='index') def recv_bytes_into(self, space, w_buffer, offset=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) length = rwbuffer.getlength() res, newbuf = self.do_recv_string( @@ -149,7 +150,7 @@ w_pickled = space.call_method( w_picklemodule, "dumps", w_obj, w_protocol) - buf = space.bufferstr_w(w_pickled) + buf = space.str_w(w_pickled) self.do_send_string(space, buf, 0, len(buf)) def recv(self, space): diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,12 +1,14 @@ -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer # XXX not the most efficient implementation -class RawFFIBuffer(RWBuffer): +class RawFFIBuffer(Buffer): + _immutable_ = True def __init__(self, datainstance): self.datainstance = datainstance + self.readonly = False def getlength(self): return self.datainstance.getrawsize() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -16,6 +16,7 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.rarithmetic import intmask, r_uint +from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker TYPEMAP = { @@ -352,8 +353,13 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def buffer_w(self, space): - from pypy.module._rawffi.buffer import RawFFIBuffer + def buffer_w(self, space, flags): + return RawFFIBuffer(self) + + def readbuf_w(self, space): + return RawFFIBuffer(self) + + def writebuf_w(self, space): return RawFFIBuffer(self) def getrawsize(self): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1100,6 +1100,12 @@ assert a[3] == 'z' assert a[4] == 't' + b = memoryview(a) + assert len(b) == 10 + assert b[3] == 'z' + b[3] = 'x' + assert b[3] == 'x' + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -419,7 +419,7 @@ @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt @@ -430,7 +430,7 @@ @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -545,8 +545,12 @@ s.connect(("www.python.org", 80)) except _socket.gaierror, ex: skip("GAIError - probably no connection: %s" % str(ex.args)) + exc = raises(TypeError, s.send, None) + assert str(exc.value) == "must be string or buffer, not None" assert s.send(buffer('')) == 0 assert s.sendall(buffer('')) is None + assert s.send(memoryview('')) == 0 + assert s.sendall(memoryview('')) is None assert s.send(u'') == 0 assert s.sendall(u'') is None raises(UnicodeEncodeError, s.send, u'\xe9') @@ -678,6 +682,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes = cli.recv_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_recvfrom_into(self): import socket import array @@ -693,6 +704,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes, addr = cli.recvfrom_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -34,8 +34,8 @@ def slice_w(space, ctx, start, end, w_default): if 0 <= start <= end: - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string[start:end]) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,7 +98,7 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a StrMatchContext or a UnicodeMatchContext for searching + """Make a BufMatchContext or a UnicodeMatchContext for searching in the given w_string object.""" space = self.space if pos < 0: @@ -114,12 +114,14 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - str = space.bufferstr_w(w_string) - if pos > len(str): - pos = len(str) - if endpos > len(str): - endpos = len(str) - return rsre_core.StrMatchContext(self.code, str, + buf = space.readbuf_w(w_string) + size = buf.getlength() + assert size >= 0 + if pos > size: + pos = size + if endpos > size: + endpos = size + return rsre_core.BufMatchContext(self.code, buf, pos, endpos, self.flags) def getmatch(self, ctx, found): @@ -477,8 +479,8 @@ def fget_string(self, space): ctx = self.ctx - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.as_str()) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError, wrap_windowserror +from pypy.interpreter.error import OperationError, wrap_windowserror, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32 from rpython.rlib.rarithmetic import r_uint, intmask @@ -327,7 +327,14 @@ buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') buf[0] = '\0' else: - value = space.bufferstr_w(w_value) + try: + value = w_value.readbuf_w(space) + except TypeError: + raise oefmt(space.w_TypeError, + "Objects of type '%T' can not be used as binary " + "registry values", w_value) + else: + value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -137,11 +137,15 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx + from _winreg import CreateKey, SetValueEx, REG_BINARY key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) + exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, + REG_BINARY, memoryview('abc')) + assert str(exc.value) == ("Objects of type 'memoryview' can not " + "be used as binary registry values") def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -1,6 +1,7 @@ from __future__ import with_statement from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable @@ -9,7 +10,6 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( interp2app, interpindirect2app, unwrap_spec) @@ -42,7 +42,7 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] if space.type(w_initializer) is space.w_str: - a.descr_fromstring(space, space.str_w(w_initializer)) + a.descr_fromstring(space, w_initializer) elif space.type(w_initializer) is space.w_list: a.descr_fromlist(space, w_initializer) else: @@ -132,8 +132,11 @@ self.len = 0 self.allocated = 0 - def buffer_w(self, space): - return ArrayBuffer(self) + def readbuf_w(self, space): + return ArrayBuffer(self, True) + + def writebuf_w(self, space): + return ArrayBuffer(self, False) def descr_append(self, space, w_x): """ append(x) @@ -229,13 +232,13 @@ self._charbuf_stop() return self.space.wrap(s) - @unwrap_spec(s=str) - def descr_fromstring(self, space, s): + def descr_fromstring(self, space, w_s): """ fromstring(string) Appends items from the string, interpreting it as an array of machine values,as if it had been read from a file using the fromfile() method). """ + s = space.getarg_w('s#', w_s) if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) @@ -267,10 +270,10 @@ elems = max(0, len(item) - (len(item) % self.itemsize)) if n != 0: item = item[0:elems] - self.descr_fromstring(space, item) + self.descr_fromstring(space, space.wrap(item)) msg = "not enough items in file" raise OperationError(space.w_EOFError, space.wrap(msg)) - self.descr_fromstring(space, item) + self.descr_fromstring(space, w_item) @unwrap_spec(w_f=W_File) def descr_tofile(self, space, w_f): @@ -583,9 +586,12 @@ v.typecode = k unroll_typecodes = unrolling_iterable(types.keys()) -class ArrayBuffer(RWBuffer): - def __init__(self, array): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, array, readonly): self.array = array + self.readonly = readonly def getlength(self): return self.array.len * self.array.itemsize diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -155,6 +155,11 @@ a.fromstring('Hi!') assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3 a = self.array('c') + a.fromstring(buffer('xyz')) + exc = raises(TypeError, a.fromstring, memoryview('xyz')) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + assert a[0] == 'x' and a[1] == 'y' and a[2] == 'z' and len(a) == 3 + a = self.array('c') a.fromstring('') assert not len(a) @@ -421,12 +426,8 @@ def test_buffer_write(self): a = self.array('c', 'hello') buf = buffer(a) - print repr(buf) - try: - buf[3] = 'L' - except TypeError: - skip("buffer(array) returns a read-only buffer on CPython") - assert a.tostring() == 'helLo' + exc = raises(TypeError, "buf[3] = 'L'") + assert str(exc.value) == "buffer is read-only" def test_buffer_keepalive(self): buf = buffer(self.array('c', 'text')) diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py --- a/pypy/module/cStringIO/interp_stringio.py +++ b/pypy/module/cStringIO/interp_stringio.py @@ -160,10 +160,10 @@ raise OperationError(space.w_IOError, space.wrap("negative size")) self.truncate(size) - @unwrap_spec(buffer='bufferstr') - def descr_write(self, buffer): + def descr_write(self, space, w_buffer): + buffer = space.getarg_w('s*', w_buffer) self.check_closed() - self.write(buffer) + self.write(buffer.as_str()) def descr_writelines(self, w_lines): self.check_closed() @@ -236,5 +236,5 @@ if space.is_none(w_string): return space.wrap(W_OutputType(space)) else: - string = space.bufferstr_w(w_string) + string = space.getarg_w('s*', w_string).as_str() return space.wrap(W_InputType(space, string)) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj) + buf = space.buffer_w(w_obj, space.BUF_SIMPLE) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,7 +22,6 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod -from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject @@ -474,7 +473,7 @@ "PyLong_Type": "space.w_long", "PyComplex_Type": "space.w_complex", "PyByteArray_Type": "space.w_bytearray", - "PyMemoryView_Type": "space.gettypeobject(W_MemoryView.typedef)", + "PyMemoryView_Type": "space.w_memoryview", "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", "PyBaseObject_Type": "space.w_object", 'PyNone_Type': 'space.type(space.w_None)', diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -1,12 +1,12 @@ +from rpython.rlib.buffer import StringBuffer, SubBuffer from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.buffer import StringBuffer, SubBuffer from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref from pypy.module.array.interp_array import ArrayBuffer -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -15,8 +15,8 @@ from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments +from rpython.rlib.buffer import Buffer from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize from rpython.tool.sourcetools import func_renamer @@ -230,11 +230,13 @@ class CPyBuffer(Buffer): # Similar to Py_buffer + _immutable_ = True def __init__(self, ptr, size, w_obj): self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive + self.readonly = True def getlength(self): return self.size diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -53,8 +53,11 @@ def test_buffer(self): module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) + buf = buffer(arr) + exc = raises(TypeError, "buf[1] = '1'") + assert str(exc.value) == "buffer is read-only" # XXX big-endian - assert str(buffer(arr)) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -1,6 +1,6 @@ from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, wrap_oserror, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -92,33 +92,27 @@ op = rffi.cast(rffi.INT, op) # C long => C int try: - intarg = space.int_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = fcntl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "fcntl") - return space.wrap(rv) - - try: - arg = space.bufferstr_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: ll_arg = rffi.str2charp(arg) - rv = fcntl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "fcntl") - return space.wrap(arg) + try: + rv = fcntl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "fcntl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - raise OperationError(space.w_TypeError, - space.wrap("int or string or buffer required")) + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = fcntl_int(fd, op, intarg) + if rv < 0: + raise _get_error(space, "fcntl") + return space.wrap(rv) @unwrap_spec(op=int) def flock(space, w_fd, op): @@ -207,50 +201,50 @@ fd = space.c_filedescriptor_w(w_fd) op = rffi.cast(rffi.INT, op) # C long => C int - if mutate_flag != 0: - try: - rwbuffer = space.rwbuffer_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - if mutate_flag > 0: - raise - else: - arg = rwbuffer.as_str() - ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "ioctl") - rwbuffer.setslice(0, arg) - return space.wrap(rv) - try: - intarg = space.int_w(w_arg) + rwbuffer = space.writebuf_w(w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = ioctl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "ioctl") - return space.wrap(rv) + arg = rwbuffer.as_str() + ll_arg = rffi.str2charp(arg) + try: + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + if mutate_flag != 0: + rwbuffer.setslice(0, arg) + return space.wrap(rv) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') + + if mutate_flag != -1: + raise OperationError(space.w_TypeError, space.wrap( + "ioctl requires a file or file descriptor, an integer " + "and optionally an integer or buffer argument")) try: - arg = space.bufferstr_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "ioctl") - return space.wrap(arg) + try: + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - raise OperationError(space.w_TypeError, - space.wrap("int or string or buffer required")) + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = ioctl_int(fd, op, intarg) + if rv < 0: + raise _get_error(space, "ioctl") + return space.wrap(rv) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -51,6 +51,8 @@ assert fcntl.fcntl(f, 1, 0) == 0 assert fcntl.fcntl(f, 2, "foo") == "foo" assert fcntl.fcntl(f, 2, buffer("foo")) == "foo" + exc = raises(TypeError, fcntl.fcntl, f, 2, memoryview("foo")) + assert 'integer' in str(exc.value) try: os.O_LARGEFILE @@ -226,6 +228,18 @@ assert res == 0 assert buf.tostring() == expected + buf = array.array('i', [0]) + res = fcntl.ioctl(mfd, TIOCGPGRP, buffer(buf)) + assert res == expected + assert buf.tostring() == '\x00' * 4 + + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview('abc')) + assert 'integer' in str(exc.value) + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, buffer(buf), False) + assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview('abc'), False) + assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" + res = fcntl.ioctl(mfd, TIOCGPGRP, buf, False) assert res == expected diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -220,7 +220,7 @@ space = self.space if space.type(w_obj).is_heaptype(): try: - buf = space.buffer_w(w_obj) + buf = space.readbuf_w(w_obj) except OperationError as e: if not e.match(space, space.w_TypeError): raise @@ -327,21 +327,8 @@ def invalid_typecode(space, u, tc): - # %r not supported in rpython - #u.raise_exc('invalid typecode in unmarshal: %r' % tc) - c = ord(tc) - if c < 16: - s = '\\x0%x' % c - elif c < 32 or c > 126: - s = '\\x%x' % c - elif tc == '\\': - s = r'\\' - else: - s = tc - q = "'" - if s[0] == "'": - q = '"' - u.raise_exc('invalid typecode in unmarshal: ' + q + s + q) + u.raise_exc("bad marshal data (unknown type code)") + def register(codes, func): """NOT_RPYTHON""" @@ -476,13 +463,7 @@ # Unmarshaller with inlined buffer string def __init__(self, space, w_str): Unmarshaller.__init__(self, space, None) - try: - self.bufstr = space.bufferstr_w(w_str) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, space.wrap( - 'marshal.loads() arg must be string or buffer')) + self.bufstr = space.getarg_w('s#', w_str) self.bufpos = 0 self.limit = len(self.bufstr) diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -14,11 +14,17 @@ print(repr(s)) x = marshal.loads(s) assert x == case and type(x) is type(case) - f = StringIO.StringIO() - marshal.dump(case, f) - f.seek(0) - x = marshal.load(f) - assert x == case and type(x) is type(case) + + exc = raises(TypeError, marshal.loads, memoryview(s)) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + + import sys + if '__pypy__' in sys.builtin_module_names: + f = StringIO.StringIO() + marshal.dump(case, f) + f.seek(0) + x = marshal.load(f) + assert x == case and type(x) is type(case) return x def test_None(self): @@ -191,7 +197,7 @@ def test_bad_typecode(self): import marshal exc = raises(ValueError, marshal.loads, chr(1)) - assert r"'\x01'" in exc.value.message + assert str(exc.value) == "bad marshal data (unknown type code)" class AppTestSmallLong(AppTestMarshal): diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -43,6 +43,8 @@ s = marshal.dumps(array.array('c', 'asd')) t = marshal.loads(s) assert type(t) is str and t == 'asd' + exc = raises(ValueError, marshal.dumps, memoryview('asd')) + assert str(exc.value) == "unmarshallable object" def test_unmarshal_evil_long(self): import marshal diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -344,8 +344,14 @@ def descr_copy(self, space): return self.convert_to(space, self.get_dtype(space)) - def buffer_w(self, space): - return self.descr_ravel(space).buffer_w(space) + def buffer_w(self, space, flags): + return self.descr_ravel(space).buffer_w(space, flags) + + def readbuf_w(self, space): + return self.descr_ravel(space).readbuf_w(space) + + def charbuf_w(self, space): + return self.descr_ravel(space).charbuf_w(space) def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,6 +1,6 @@ -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE @@ -316,8 +316,8 @@ def get_storage(self): return self.storage - def get_buffer(self, space): - return ArrayBuffer(self) + def get_buffer(self, space, readonly): + return ArrayBuffer(self, readonly) def astype(self, space, dtype): strides, backstrides = calc_strides(self.get_shape(), dtype, @@ -471,9 +471,12 @@ free_raw_storage(self.storage) -class ArrayBuffer(RWBuffer): - def __init__(self, impl): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, impl, readonly): self.impl = impl + self.readonly = readonly def getitem(self, item): return raw_storage_getitem(lltype.Char, self.impl.storage, item) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -610,11 +610,20 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ctypes not implemented yet")) - def buffer_w(self, space): - return self.implementation.get_buffer(space) + def buffer_w(self, space, flags): + return self.implementation.get_buffer(space, True) + + def readbuf_w(self, space): + return self.implementation.get_buffer(space, True) + + def writebuf_w(self, space): + return self.implementation.get_buffer(space, False) + + def charbuf_w(self, space): + return self.implementation.get_buffer(space, True).as_str() def descr_get_data(self, space): - return space.newbuffer(self.buffer_w(space)) + return space.newbuffer(self.implementation.get_buffer(space, False)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -1188,7 +1197,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) - buf = space.buffer_w(w_buffer) + try: + buf = space.writebuf_w(w_buffer) + except OperationError: + buf = space.readbuf_w(w_buffer) try: raw_ptr = buf.get_raw_address() except ValueError: @@ -1206,7 +1218,7 @@ return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, w_subtype=w_subtype, w_base=w_buffer, - writable=buf.is_writable()) + writable=not buf.readonly) order = order_converter(space, w_order, NPY.CORDER) if order == NPY.CORDER: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -347,6 +347,9 @@ a = np.array([1,2,3]) b = buffer(a) assert type(b) is buffer + assert 'read-only buffer' in repr(b) + exc = raises(TypeError, "b[0] = '0'") + assert str(exc.value) == 'buffer is read-only' def test_type(self): from numpypy import array @@ -2242,6 +2245,7 @@ a.data[4] = '\xff' assert a[1] == 0xff assert len(a.data) == 16 + assert type(a.data) is buffer def test_explicit_dtype_conversion(self): from numpypy import array diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -2,8 +2,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.buffer import RWBuffer from rpython.rlib import rmmap, rarithmetic +from rpython.rlib.buffer import Buffer from rpython.rlib.rmmap import RValueError, RTypeError, RMMapError if rmmap.HAVE_LARGEFILE_SUPPORT: @@ -17,9 +17,9 @@ self.space = space self.mmap = mmap_obj - def buffer_w(self, space): + def readbuf_w(self, space): self.check_valid() - return MMapBuffer(self.space, self.mmap) + return MMapBuffer(self.space, self.mmap, True) def close(self): self.mmap.close() @@ -286,10 +286,13 @@ mmap_error._dont_inline_ = True -class MMapBuffer(RWBuffer): - def __init__(self, space, mmap): +class MMapBuffer(Buffer): + _immutable_ = True + + def __init__(self, space, mmap, readonly): self.space = space self.mmap = mmap + self.readonly = readonly def getlength(self): return self.mmap.size @@ -303,7 +306,7 @@ if step == 1: return self.mmap.getslice(start, size) else: - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setitem(self, index, char): self.check_valid_writeable() @@ -313,14 +316,6 @@ self.check_valid_writeable() self.mmap.setslice(start, string) - def is_writable(self): - try: - self.mmap.check_writeable() - except RMMapError: - return False - else: - return True - def get_raw_address(self): self.check_valid() return self.mmap.data diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -560,14 +560,24 @@ m = mmap(f.fileno(), 6) m[5] = '?' b = buffer(m) - try: From noreply at buildbot.pypy.org Fri Apr 25 10:15:22 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 10:15:22 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: adjust test to new buffer interface Message-ID: <20140425081522.50D941C11B1@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70966:1f6f6b1fac06 Date: 2014-04-25 00:04 -0700 http://bitbucket.org/pypy/pypy/changeset/1f6f6b1fac06/ Log: adjust test to new buffer interface diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -104,6 +104,7 @@ class dummy: pass self.config = dummy() self.config.translating = False + self.BUF_SIMPLE = 1 def issequence_w(self, w_obj): return True @@ -132,7 +133,7 @@ return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' - def buffer_w(self, w_obj): + def buffer_w(self, w_obj, flags): return FakeBuffer(w_obj) def exception_match(self, typ, sub): From noreply at buildbot.pypy.org Fri Apr 25 10:15:23 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 10:15:23 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fixups after recent changes to buffers Message-ID: <20140425081523.7C6181C11B1@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70967:7244dec161c4 Date: 2014-04-25 00:58 -0700 http://bitbucket.org/pypy/pypy/changeset/7244dec161c4/ Log: fixups after recent changes to buffers diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj, space.BUF_SIMPLE) + buf = space.readbuf_w(w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.readbuf_w(w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.readbuf_w(w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: From noreply at buildbot.pypy.org Fri Apr 25 10:15:25 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 10:15:25 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: add readbuf_w Message-ID: <20140425081525.01A331C11B1@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70968:b95344d84ba8 Date: 2014-04-25 01:10 -0700 http://bitbucket.org/pypy/pypy/changeset/b95344d84ba8/ Log: add readbuf_w diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -136,6 +136,9 @@ def buffer_w(self, w_obj, flags): return FakeBuffer(w_obj) + def readbuf_w(self, w_obj): + return FakeBuffer(w_obj) + def exception_match(self, typ, sub): return typ is sub From noreply at buildbot.pypy.org Fri Apr 25 10:15:26 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 25 Apr 2014 10:15:26 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support into default: fixes for new buffer interfaces Message-ID: <20140425081526.2D13C1C11B1@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70969:4b806e538c61 Date: 2014-04-25 01:14 -0700 http://bitbucket.org/pypy/pypy/changeset/4b806e538c61/ Log: merge reflex-support into default: fixes for new buffer interfaces diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj, space.BUF_SIMPLE) + buf = space.readbuf_w(w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.readbuf_w(w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.readbuf_w(w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -104,6 +104,7 @@ class dummy: pass self.config = dummy() self.config.translating = False + self.BUF_SIMPLE = 1 def issequence_w(self, w_obj): return True @@ -132,7 +133,10 @@ return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' - def buffer_w(self, w_obj): + def buffer_w(self, w_obj, flags): + return FakeBuffer(w_obj) + + def readbuf_w(self, w_obj): return FakeBuffer(w_obj) def exception_match(self, typ, sub): From noreply at buildbot.pypy.org Fri Apr 25 10:34:28 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 25 Apr 2014 10:34:28 +0200 (CEST) Subject: [pypy-commit] pypy default: respect PYPY_LOCALBASE in windows Message-ID: <20140425083428.298951C3434@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70970:3b9b82ae5747 Date: 2014-04-25 11:33 +0300 http://bitbucket.org/pypy/pypy/changeset/3b9b82ae5747/ Log: respect PYPY_LOCALBASE in windows diff --git a/rpython/translator/platform/test/test_posix.py b/rpython/translator/platform/test/test_posix.py --- a/rpython/translator/platform/test/test_posix.py +++ b/rpython/translator/platform/test/test_posix.py @@ -56,8 +56,13 @@ finally: del os.environ['PYPY_LOCALBASE'] Makefile = tmpdir.join('Makefile').read() - assert 'INCLUDEDIRS = -I/foo/baz/include' in Makefile - assert 'LIBDIRS = -L/foo/baz/lib' in Makefile + include_prefix = '-I' + lib_prefix = '-L' + if self.platform.name == 'msvc': + include_prefix = '/I' + lib_prefix = '/LIBPATH:' + assert 'INCLUDEDIRS = %s/foo/baz/include' % include_prefix in Makefile + assert 'LIBDIRS = %s/foo/baz/lib' % lib_prefix in Makefile class TestMaemo(TestMakefile): strict_on_stderr = False diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -292,7 +292,10 @@ rel_ofiles = [rel_cfile[:rel_cfile.rfind('.')]+'.obj' for rel_cfile in rel_cfiles] m.cfiles = rel_cfiles - rel_includedirs = [rpyrel(incldir) for incldir in eci.include_dirs] + rel_includedirs = [rpyrel(incldir) for incldir in + self.preprocess_include_dirs(eci.include_dirs)] + rel_libdirs = [rpyrel(libdir) for libdir in + self.preprocess_library_dirs(eci.library_dirs)] m.comment('automatically generated makefile') definitions = [ @@ -302,7 +305,7 @@ ('SOURCES', rel_cfiles), ('OBJECTS', rel_ofiles), ('LIBS', self._libs(eci.libraries)), - ('LIBDIRS', self._libdirs(eci.library_dirs)), + ('LIBDIRS', self._libdirs(rel_libdirs)), ('INCLUDEDIRS', self._includedirs(rel_includedirs)), ('CFLAGS', self.cflags), ('CFLAGSEXTRA', list(eci.compile_extra)), From noreply at buildbot.pypy.org Fri Apr 25 11:12:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 25 Apr 2014 11:12:14 +0200 (CEST) Subject: [pypy-commit] pypy default: Check that string interpolation works, and is contant-folded if all Message-ID: <20140425091214.CD3221C33B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70971:6e240b6b7d94 Date: 2014-04-25 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/6e240b6b7d94/ Log: Check that string interpolation works, and is contant-folded if all arguments are constant. diff --git a/rpython/jit/metainterp/test/test_string.py b/rpython/jit/metainterp/test/test_string.py --- a/rpython/jit/metainterp/test/test_string.py +++ b/rpython/jit/metainterp/test/test_string.py @@ -654,3 +654,23 @@ self.check_resops(call_pure=0, unicodesetitem=0, call=2, newunicode=0, unicodegetitem=0, copyunicodecontent=0) + + def test_string_interpolation(self): + def f(x, y): + return len('<%d %d>' % (x, y)) + res = self.interp_operations(f, [222, 3333]) + assert res == 10 + + def test_string_interpolation_constants(self): + jitdriver = JitDriver(greens=['x', 'y'], reds=['z']) + def f(x, y): + z = 0 + while z < 10: + jitdriver.jit_merge_point(x=x, y=y, z=z) + if len('<%d %d>' % (x, y)) != 10: + raise Exception + z += 1 + return 0 + self.meta_interp(f, [222, 3333]) + self.check_simple_loop({'guard_true': 1, 'int_add': 1, + 'int_lt': 1, 'jump': 1}) From noreply at buildbot.pypy.org Fri Apr 25 12:25:52 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 25 Apr 2014 12:25:52 +0200 (CEST) Subject: [pypy-commit] pypy default: write a passing test (because why not) Message-ID: <20140425102552.6D0911C340B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70972:cb98a5f2a5e9 Date: 2014-04-25 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/cb98a5f2a5e9/ Log: write a passing test (because why not) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8356,6 +8356,31 @@ """ self.optimize_loop(ops, ops) + def test_unroll_failargs(self): + ops = """ + [p0, i1] + p1 = getfield_gc(p0, descr=valuedescr) + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2) + """ + expected = """ + [p0, i1, p1] + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2, p1) + """ + preamble = """ + [p0, i1] + p1 = getfield_gc(p0, descr=valuedescr) + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2, p1) + """ + self.optimize_loop(ops, expected, preamble) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Fri Apr 25 12:25:55 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 25 Apr 2014 12:25:55 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20140425102555.966621C340B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70973:d2ea3fe50e5e Date: 2014-04-25 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/d2ea3fe50e5e/ Log: merge diff too long, truncating to 2000 out of 5203 lines diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + with fp: + imp.load_module('_testcapi', fp, filename, description) except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. @@ -577,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,7 +5,10 @@ .. this is a revision shortly after release-2.3.x .. startrev: ba569fe1efdb - - .. branch: small-unroll-improvements Improve optimiziation of small allocation-heavy loops in the JIT + +.. branch: reflex-support + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) + compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX @@ -194,13 +194,37 @@ def immutable_unique_id(self, space): return None - def buffer_w(self, space): + def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): - return w_result.buffer_w(space) - self._typed_unwrap_error(space, "buffer") + return w_result.buffer_w(space, flags) + raise TypeError + + def readbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.readbuf_w(space) + raise TypeError + + def writebuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.writebuf_w(space) + raise TypeError + + def charbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.charbuf_w(space) + raise TypeError def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1340,25 +1364,111 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - def buffer_w(self, w_obj): - return w_obj.buffer_w(self) + BUF_SIMPLE = 0x0000 + BUF_WRITABLE = 0x0001 + BUF_FORMAT = 0x0004 + BUF_ND = 0x0008 + BUF_STRIDES = 0x0010 | BUF_ND + BUF_INDIRECT = 0x0100 | BUF_STRIDES - def rwbuffer_w(self, w_obj): - # returns a RWBuffer instance - from pypy.interpreter.buffer import RWBuffer - buffer = self.buffer_w(w_obj) - if not isinstance(buffer, RWBuffer): - raise OperationError(self.w_TypeError, - self.wrap('read-write buffer expected')) - return buffer + BUF_CONTIG_RO = BUF_ND + BUF_CONTIG = BUF_ND | BUF_WRITABLE - def bufferstr_new_w(self, w_obj): - # Implement the "new buffer interface" (new in Python 2.7) - # returning an unwrapped string. It doesn't accept unicode - # strings - buffer = self.buffer_w(w_obj) - return buffer.as_str() + BUF_FULL_RO = BUF_INDIRECT | BUF_FORMAT + BUF_FULL = BUF_INDIRECT | BUF_FORMAT | BUF_WRITABLE + def check_buf_flags(self, flags, readonly): + if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: + raise oefmt(self.w_BufferError, "Object is not writable.") + + def buffer_w(self, w_obj, flags): + # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) + try: + return w_obj.buffer_w(self, flags) + except TypeError: + raise oefmt(self.w_TypeError, + "'%T' does not have the buffer interface", w_obj) + + def readbuf_w(self, w_obj): + # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) + try: + return w_obj.readbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a readable buffer object") + + def writebuf_w(self, w_obj): + # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) + try: + return w_obj.writebuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a writeable buffer object") + + def charbuf_w(self, w_obj): + # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) + try: + return w_obj.charbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a character buffer object") + + def _getarg_error(self, expected, w_obj): + if self.is_none(w_obj): + name = "None" + else: + name = self.type(w_obj).get_module_type_name() + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + + @specialize.arg(1) + def getarg_w(self, code, w_obj): + if code == 'z*': + if self.is_none(w_obj): + return None + code = 's*' + if code == 's*': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.readbuf_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).readbuf_w(self) + try: + return w_obj.buffer_w(self, 0) + except TypeError: + pass + try: + return w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + elif code == 's#': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.str_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).str_w(self) + try: + return w_obj.readbuf_w(self).as_str() + except TypeError: + self._getarg_error("string or read-only buffer", w_obj) + elif code == 'w*': + try: + try: + return w_obj.buffer_w(self, self.BUF_WRITABLE) + except OperationError: + self._getarg_error("read-write buffer", w_obj) + except TypeError: + pass + try: + return w_obj.writebuf_w(self) + except TypeError: + self._getarg_error("read-write buffer", w_obj) + elif code == 't#': + try: + return w_obj.charbuf_w(self) + except TypeError: + self._getarg_error("string or read-only character buffer", w_obj) + else: + assert False + + # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): @@ -1373,8 +1483,18 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - buffer = self.buffer_w(w_obj) - return buffer.as_str() + try: + buf = w_obj.buffer_w(self, 0) + except TypeError: + pass + else: + return buf.as_str() + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + else: + return buf.as_str() def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): @@ -1721,6 +1841,7 @@ 'AssertionError', 'AttributeError', 'BaseException', + 'BufferError', 'DeprecationWarning', 'EOFError', 'EnvironmentError', diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py deleted file mode 100644 --- a/pypy/interpreter/buffer.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Buffer protocol support. -""" -from rpython.rlib.objectmodel import import_from_mixin - - -class Buffer(object): - """Abstract base class for buffers.""" - __slots__ = [] - - def getlength(self): - raise NotImplementedError - - def as_str(self): - "Returns an interp-level string with the whole content of the buffer." - # May be overridden. - return self.getslice(0, self.getlength(), 1, self.getlength()) - - def getitem(self, index): - "Returns the index'th character in the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def getslice(self, start, stop, step, size): - # May be overridden. No bounds checks. - return ''.join([self.getitem(i) for i in range(start, stop, step)]) - - def get_raw_address(self): - raise ValueError("no raw buffer") - - def is_writable(self): - return False - - -class RWBuffer(Buffer): - """Abstract base class for read-write buffers.""" - __slots__ = [] - - def is_writable(self): - return True - - def setitem(self, index, char): - "Write a character into the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def setslice(self, start, string): - # May be overridden. No bounds checks. - for i in range(len(string)): - self.setitem(start + i, string[i]) - - -class StringBuffer(Buffer): - __slots__ = ['value'] - - def __init__(self, value): - self.value = value - - def getlength(self): - return len(self.value) - - def as_str(self): - return self.value - - def getitem(self, index): - return self.value[index] - - def getslice(self, start, stop, step, size): - if size == 0: - return "" - if step == 1: - assert 0 <= start <= stop - return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) -# ____________________________________________________________ - - -class SubBufferMixin(object): - _attrs_ = ['buffer', 'offset', 'size'] - - def __init__(self, buffer, offset, size): - self.buffer = buffer - self.offset = offset - self.size = size - - def getlength(self): - at_most = self.buffer.getlength() - self.offset - if 0 <= self.size <= at_most: - return self.size - elif at_most >= 0: - return at_most - else: - return 0 - - def getitem(self, index): - return self.buffer.getitem(self.offset + index) - - def getslice(self, start, stop, step, size): - if start == stop: - return '' # otherwise, adding self.offset might make them - # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, - step, size) - - -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) - - -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) - - def setitem(self, index, char): - self.buffer.setitem(self.offset + index, char) - - def setslice(self, start, string): - if len(string) == 0: - return # otherwise, adding self.offset might make 'start' - # out of bounds - self.buffer.setslice(self.offset + start, string) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -881,8 +881,8 @@ def LOAD_NAME(self, nameindex, next_instr): if self.w_locals is not self.w_globals: - w_varname = self.getname_w(nameindex) - w_value = self.space.finditem(self.w_locals, w_varname) + varname = self.getname_u(nameindex) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py deleted file mode 100644 --- a/pypy/interpreter/test/test_buffer.py +++ /dev/null @@ -1,43 +0,0 @@ -import py -from rpython.tool.udir import udir - -testdir = udir.ensure('test_buffer', dir=1) - - -class TestBuffer: - def test_buffer_w(self): - space = self.space - w_hello = space.wrap('hello world') - buf = space.buffer_w(w_hello) - assert buf.getlength() == 11 - assert buf.as_str() == 'hello world' - assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.newbuffer(buf)) is buf - assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' - space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - - def test_file_write(self): - space = self.space - w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) - filename = str(testdir.join('test_file_write')) - space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): - f = open(filename, 'wb') - f.write(buffer) - f.close() - """) - f = open(filename, 'rb') - data = f.read() - f.close() - assert data == 'hello world' - - def test_unicode(self): - space = self.space - s = space.bufferstr_w(space.wrap(u'hello')) - assert type(s) is str - assert s == 'hello' - space.raises_w(space.w_UnicodeEncodeError, - space.bufferstr_w, space.wrap(u'\xe9')) - - -# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,13 +2,16 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec -class ByteBuffer(RWBuffer): +class ByteBuffer(Buffer): + _immutable_ = True + def __init__(self, len): self.data = ['\x00'] * len + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -13,3 +13,18 @@ assert b[-1] == '*' assert b[-2] == '-' assert b[-3] == '+' + exc = raises(TypeError, "b[3] = 'abc'") + assert str(exc.value) == "right operand must be a single byte" + exc = raises(TypeError, "b[3:5] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + exc = raises(TypeError, "b[3:7:2] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + + b = bytebuffer(10) + b[1:3] = 'xy' + assert str(b) == "\x00xy" + "\x00" * 7 + b[4:8:2] = 'zw' + assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 + r = str(buffer(u'#')) + b[6:6+len(r)] = u'#' + assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,21 +1,22 @@ -from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer +from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw -class LLBuffer(RWBuffer): +class LLBuffer(Buffer): _immutable_ = True def __init__(self, raw_cdata, size): self.raw_cdata = raw_cdata self.size = size + self.readonly = False def getlength(self): return self.size @@ -32,7 +33,7 @@ def getslice(self, start, stop, step, size): if step == 1: return rffi.charpsize2str(rffi.ptradd(self.raw_cdata, start), size) - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) @@ -46,6 +47,14 @@ W_Buffer.__init__(self, buffer) self.keepalive = keepalive + def descr_setitem(self, space, w_index, w_obj): + try: + W_Buffer.descr_setitem(self, space, w_index, w_obj) + except OperationError as e: + if e.match(space, space.w_TypeError): + e.w_type = space.w_ValueError + raise + MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -72,8 +72,8 @@ 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', - 'charbuffer_encode': 'interp_codecs.buffer_encode', - 'readbuffer_encode': 'interp_codecs.buffer_encode', + 'charbuffer_encode': 'interp_codecs.charbuffer_encode', + 'readbuffer_encode': 'interp_codecs.readbuffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', 'charmap_encode' : 'interp_codecs.charmap_encode', 'escape_encode' : 'interp_codecs.escape_encode', diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -321,8 +321,14 @@ w_res = space.call_function(w_encoder, w_obj, space.wrap(errors)) return space.getitem(w_res, space.wrap(0)) - at unwrap_spec(s='bufferstr', errors='str_or_None') -def buffer_encode(space, s, errors='strict'): + at unwrap_spec(errors='str_or_None') +def readbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('s#', w_data) + return space.newtuple([space.wrap(s), space.wrap(len(s))]) + + at unwrap_spec(errors='str_or_None') +def charbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('t#', w_data) return space.newtuple([space.wrap(s), space.wrap(len(s))]) @unwrap_spec(errors=str) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -420,9 +420,13 @@ for (i, line) in enumerate(reader): assert line == s[i] - def test_array(self): + def test_buffer_encode(self): import _codecs, array - _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + assert _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + exc = raises(TypeError, _codecs.charbuffer_encode, array.array('c', 'spam')) + assert str(exc.value) == "must be string or read-only character buffer, not array.array" + assert _codecs.readbuffer_encode(u"test") == ('test', 4) + assert _codecs.charbuffer_encode(u"test") == ('test', 4) def test_utf8sig(self): import codecs diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -267,9 +267,14 @@ def direct_write(self, w_data): space = self.space - if not self.binary and space.isinstance_w(w_data, space.w_unicode): - w_data = space.call_method(w_data, "encode", space.wrap(self.encoding), space.wrap(self.errors)) - data = space.bufferstr_w(w_data) + if self.binary: + data = space.getarg_w('s*', w_data).as_str() + else: + if space.isinstance_w(w_data, space.w_unicode): + w_data = space.call_method(w_data, "encode", + space.wrap(self.encoding), + space.wrap(self.errors)) + data = space.charbuf_w(w_data) self.do_direct_write(data) def do_direct_write(self, data): @@ -469,7 +474,7 @@ """readinto() -> Undocumented. Don't use this; it may go away.""" # XXX not the most efficient solution as it doesn't avoid the copying space = self.space - rwbuffer = space.rwbuffer_w(w_rwbuffer) + rwbuffer = space.writebuf_w(w_rwbuffer) w_data = self.file_read(rwbuffer.getlength()) data = space.str_w(w_data) rwbuffer.setslice(0, data) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -80,7 +80,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) @@ -101,11 +101,14 @@ readinto = interp2app(W_BufferedIOBase.readinto_w), ) -class RawBuffer(RWBuffer): +class RawBuffer(Buffer): + _immutable_ = True + def __init__(self, buf, start, length): self.buf = buf self.start = start self.length = length + self.readonly = False def getlength(self): return self.length @@ -698,7 +701,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -41,7 +41,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) size = rwbuffer.getlength() output = self.read(size) @@ -50,10 +50,7 @@ def write_w(self, space, w_data): self._check_closed(space) - if space.isinstance_w(w_data, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - "bytes string of buffer expected")) - buf = space.bufferstr_w(w_data) + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -333,7 +333,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() try: n = os.write(self.fd, data) @@ -366,7 +366,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,14 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\ncxxxxx' @@ -235,7 +243,8 @@ import _io raw = _io.FileIO(self.tmpfile, 'w') f = _io.BufferedWriter(raw) - f.write("abcd") + f.write("ab") + f.write(u"cd") f.close() assert self.readfile() == "abcd" diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -38,6 +38,8 @@ f = _io.BytesIO() assert f.write("") == 0 assert f.write("hello") == 5 + exc = raises(TypeError, f.write, u"lo") + assert str(exc.value) == "'unicode' does not have the buffer interface" import gc; gc.collect() assert f.getvalue() == "hello" f.close() @@ -97,6 +99,14 @@ a2 = bytearray('testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, b.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() assert a1 == "h" assert a2 == "elloing" diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -82,7 +82,8 @@ import _io filename = self.tmpfile + '_w' f = _io.FileIO(filename, 'wb') - f.write("test") + f.write("te") + f.write(u"st") # try without flushing f2 = _io.FileIO(filename, 'rb') assert f2.read() == "test" @@ -135,6 +136,14 @@ a = bytearray('x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\nc\0\0\0\0\0' # diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -80,8 +80,9 @@ raise OperationError(space.w_IOError, space.wrap("connection is read-only")) - @unwrap_spec(buf='bufferstr', offset='index', size='index') - def send_bytes(self, space, buf, offset=0, size=PY_SSIZE_T_MIN): + @unwrap_spec(offset='index', size='index') + def send_bytes(self, space, w_buf, offset=0, size=PY_SSIZE_T_MIN): + buf = space.getarg_w('s*', w_buf).as_str() length = len(buf) self._check_writable(space) if offset < 0: @@ -122,7 +123,7 @@ @unwrap_spec(offset='index') def recv_bytes_into(self, space, w_buffer, offset=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) length = rwbuffer.getlength() res, newbuf = self.do_recv_string( @@ -149,7 +150,7 @@ w_pickled = space.call_method( w_picklemodule, "dumps", w_obj, w_protocol) - buf = space.bufferstr_w(w_pickled) + buf = space.str_w(w_pickled) self.do_send_string(space, buf, 0, len(buf)) def recv(self, space): diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,12 +1,14 @@ -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer # XXX not the most efficient implementation -class RawFFIBuffer(RWBuffer): +class RawFFIBuffer(Buffer): + _immutable_ = True def __init__(self, datainstance): self.datainstance = datainstance + self.readonly = False def getlength(self): return self.datainstance.getrawsize() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -16,6 +16,7 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.rarithmetic import intmask, r_uint +from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker TYPEMAP = { @@ -352,8 +353,13 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def buffer_w(self, space): - from pypy.module._rawffi.buffer import RawFFIBuffer + def buffer_w(self, space, flags): + return RawFFIBuffer(self) + + def readbuf_w(self, space): + return RawFFIBuffer(self) + + def writebuf_w(self, space): return RawFFIBuffer(self) def getrawsize(self): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1100,6 +1100,12 @@ assert a[3] == 'z' assert a[4] == 't' + b = memoryview(a) + assert len(b) == 10 + assert b[3] == 'z' + b[3] = 'x' + assert b[3] == 'x' + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -419,7 +419,7 @@ @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt @@ -430,7 +430,7 @@ @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -545,8 +545,12 @@ s.connect(("www.python.org", 80)) except _socket.gaierror, ex: skip("GAIError - probably no connection: %s" % str(ex.args)) + exc = raises(TypeError, s.send, None) + assert str(exc.value) == "must be string or buffer, not None" assert s.send(buffer('')) == 0 assert s.sendall(buffer('')) is None + assert s.send(memoryview('')) == 0 + assert s.sendall(memoryview('')) is None assert s.send(u'') == 0 assert s.sendall(u'') is None raises(UnicodeEncodeError, s.send, u'\xe9') @@ -678,6 +682,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes = cli.recv_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_recvfrom_into(self): import socket import array @@ -693,6 +704,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes, addr = cli.recvfrom_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -34,8 +34,8 @@ def slice_w(space, ctx, start, end, w_default): if 0 <= start <= end: - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string[start:end]) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,7 +98,7 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a StrMatchContext or a UnicodeMatchContext for searching + """Make a BufMatchContext or a UnicodeMatchContext for searching in the given w_string object.""" space = self.space if pos < 0: @@ -114,12 +114,14 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - str = space.bufferstr_w(w_string) - if pos > len(str): - pos = len(str) - if endpos > len(str): - endpos = len(str) - return rsre_core.StrMatchContext(self.code, str, + buf = space.readbuf_w(w_string) + size = buf.getlength() + assert size >= 0 + if pos > size: + pos = size + if endpos > size: + endpos = size + return rsre_core.BufMatchContext(self.code, buf, pos, endpos, self.flags) def getmatch(self, ctx, found): @@ -477,8 +479,8 @@ def fget_string(self, space): ctx = self.ctx - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.as_str()) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError, wrap_windowserror +from pypy.interpreter.error import OperationError, wrap_windowserror, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32 from rpython.rlib.rarithmetic import r_uint, intmask @@ -327,7 +327,14 @@ buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') buf[0] = '\0' else: - value = space.bufferstr_w(w_value) + try: + value = w_value.readbuf_w(space) + except TypeError: + raise oefmt(space.w_TypeError, + "Objects of type '%T' can not be used as binary " + "registry values", w_value) + else: + value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -137,11 +137,15 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx + from _winreg import CreateKey, SetValueEx, REG_BINARY key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) + exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, + REG_BINARY, memoryview('abc')) + assert str(exc.value) == ("Objects of type 'memoryview' can not " + "be used as binary registry values") def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -1,6 +1,7 @@ from __future__ import with_statement from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable @@ -9,7 +10,6 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( interp2app, interpindirect2app, unwrap_spec) @@ -42,7 +42,7 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] if space.type(w_initializer) is space.w_str: - a.descr_fromstring(space, space.str_w(w_initializer)) + a.descr_fromstring(space, w_initializer) elif space.type(w_initializer) is space.w_list: a.descr_fromlist(space, w_initializer) else: @@ -132,8 +132,11 @@ self.len = 0 self.allocated = 0 - def buffer_w(self, space): - return ArrayBuffer(self) + def readbuf_w(self, space): + return ArrayBuffer(self, True) + + def writebuf_w(self, space): + return ArrayBuffer(self, False) def descr_append(self, space, w_x): """ append(x) @@ -229,13 +232,13 @@ self._charbuf_stop() return self.space.wrap(s) - @unwrap_spec(s=str) - def descr_fromstring(self, space, s): + def descr_fromstring(self, space, w_s): """ fromstring(string) Appends items from the string, interpreting it as an array of machine values,as if it had been read from a file using the fromfile() method). """ + s = space.getarg_w('s#', w_s) if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) @@ -267,10 +270,10 @@ elems = max(0, len(item) - (len(item) % self.itemsize)) if n != 0: item = item[0:elems] - self.descr_fromstring(space, item) + self.descr_fromstring(space, space.wrap(item)) msg = "not enough items in file" raise OperationError(space.w_EOFError, space.wrap(msg)) - self.descr_fromstring(space, item) + self.descr_fromstring(space, w_item) @unwrap_spec(w_f=W_File) def descr_tofile(self, space, w_f): @@ -583,9 +586,12 @@ v.typecode = k unroll_typecodes = unrolling_iterable(types.keys()) -class ArrayBuffer(RWBuffer): - def __init__(self, array): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, array, readonly): self.array = array + self.readonly = readonly def getlength(self): return self.array.len * self.array.itemsize diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -155,6 +155,11 @@ a.fromstring('Hi!') assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3 a = self.array('c') + a.fromstring(buffer('xyz')) + exc = raises(TypeError, a.fromstring, memoryview('xyz')) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + assert a[0] == 'x' and a[1] == 'y' and a[2] == 'z' and len(a) == 3 + a = self.array('c') a.fromstring('') assert not len(a) @@ -421,12 +426,8 @@ def test_buffer_write(self): a = self.array('c', 'hello') buf = buffer(a) - print repr(buf) - try: - buf[3] = 'L' - except TypeError: - skip("buffer(array) returns a read-only buffer on CPython") - assert a.tostring() == 'helLo' + exc = raises(TypeError, "buf[3] = 'L'") + assert str(exc.value) == "buffer is read-only" def test_buffer_keepalive(self): buf = buffer(self.array('c', 'text')) diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py --- a/pypy/module/cStringIO/interp_stringio.py +++ b/pypy/module/cStringIO/interp_stringio.py @@ -160,10 +160,10 @@ raise OperationError(space.w_IOError, space.wrap("negative size")) self.truncate(size) - @unwrap_spec(buffer='bufferstr') - def descr_write(self, buffer): + def descr_write(self, space, w_buffer): + buffer = space.getarg_w('s*', w_buffer) self.check_closed() - self.write(buffer) + self.write(buffer.as_str()) def descr_writelines(self, w_lines): self.check_closed() @@ -236,5 +236,5 @@ if space.is_none(w_string): return space.wrap(W_OutputType(space)) else: - string = space.bufferstr_w(w_string) + string = space.getarg_w('s*', w_string).as_str() return space.wrap(W_InputType(space, string)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -216,11 +216,20 @@ 'stdstring2stdstring' : ([c_object], c_object), } + # size/offset are backend-specific but fixed after load + self.c_sizeof_farg = 0 + self.c_offset_farg = 0 + + def load_reflection_library(space): state = space.fromcache(State) if state.library is None: from pypy.module._cffi_backend.libraryobj import W_Library state.library = W_Library(space, reflection_library, rdynload.RTLD_LOCAL | rdynload.RTLD_LAZY) + if state.library: + # fix constants + state.c_sizeof_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) + state.c_offset_farg = _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) return state.library def verify_backend(space): @@ -340,12 +349,12 @@ return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) def c_deallocate_function_args(space, cargs): call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) - at jit.elidable def c_function_arg_sizeof(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) - at jit.elidable + state = space.fromcache(State) + return state.c_sizeof_farg def c_function_arg_typeoffset(space): - return _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) + state = space.fromcache(State) + return state.c_offset_farg # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): @@ -365,13 +374,12 @@ def c_base_name(space, cppclass, base_index): args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) - at jit.elidable_promote('2') def c_is_subtype(space, derived, base): + jit.promote(base) if derived == base: return bool(1) return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) - at jit.elidable_promote('1,2,4') def _c_base_offset(space, derived_h, base_h, address, direction): args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj) + buf = space.readbuf_w(w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value) + buf = space.readbuf_w(w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value) + buf = space.readbuf_w(w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -604,12 +604,10 @@ def get_returntype(self): return self.space.wrap(self.converter.name) - @jit.elidable_promote() def _get_offset(self, cppinstance): if cppinstance: assert lltype.typeOf(cppinstance.cppclass.handle) == lltype.typeOf(self.scope.handle) - offset = self.offset + capi.c_base_offset(self.space, - cppinstance.cppclass, self.scope, cppinstance.get_rawobject(), 1) + offset = self.offset + cppinstance.cppclass.get_base_offset(cppinstance, self.scope) else: offset = self.offset return offset @@ -739,7 +737,6 @@ self.datamembers[name] = new_dm return new_dm - @jit.elidable_promote() def dispatch(self, name, signature): overload = self.get_overload(name) sig = '(%s)' % signature @@ -908,6 +905,10 @@ def find_datamember(self, name): raise self.missing_attribute_error(name) + def get_base_offset(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + return 0 + def get_cppthis(self, cppinstance, calling_scope): assert self == cppinstance.cppclass return cppinstance.get_rawobject() @@ -939,10 +940,15 @@ class W_ComplexCPPClass(W_CPPClass): - def get_cppthis(self, cppinstance, calling_scope): + def get_base_offset(self, cppinstance, calling_scope): assert self == cppinstance.cppclass offset = capi.c_base_offset(self.space, self, calling_scope, cppinstance.get_rawobject(), 1) + return offset + + def get_cppthis(self, cppinstance, calling_scope): + assert self == cppinstance.cppclass + offset = self.get_base_offset(cppinstance, calling_scope) return capi.direct_ptradd(cppinstance.get_rawobject(), offset) W_ComplexCPPClass.typedef = TypeDef( diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -38,6 +38,24 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; +class PseudoExample01 { +public: + PseudoExample01() : m_somedata(-99) {} + PseudoExample01(int a) : m_somedata(a) {} + PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} + PseudoExample01& operator=(const PseudoExample01& e) { + if (this != &e) m_somedata = e.m_somedata; + return *this; + } + virtual ~PseudoExample01() {} + +public: + int m_somedata; +}; + +static int example01_last_static_method = 0; +static int example01_last_constructor = 0; + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- @@ -46,27 +64,62 @@ std::vector methods; - // static double staticAddToDouble(double a); + // ( 0) static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); - // static int staticAddOneToInt(int a); - // static int staticAddOneToInt(int a, int b); + // ( 1) static int staticAddOneToInt(int a) + // ( 2) static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); - // static int staticAtoi(const char* str); + // ( 3) static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); - // static char* staticStrcpy(const char* strin); + // ( 4) static char* staticStrcpy(const char* strin) methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // ( 5) static void staticSetPayload(payload* p, double d) + // ( 6) static payload* staticCyclePayload(payload* p, double d) + // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + argtypes.clear(); + argtypes.push_back("payload*"); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + + // ( 8) static int getCount() + // ( 9) static void setCount(int) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + + // cut-off is used in cppyy_is_static + example01_last_static_method = methods.size(); + + // (10) example01() + // (11) example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + + // cut-off is used in cppyy_is_constructor + example01_last_constructor = methods.size(); + + // (12) double addDataToDouble(double a) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; // -- class example01 @@ -98,47 +151,69 @@ } +/* memory management ------------------------------------------------------ */ +void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { + if (handle == s_handles["example01"]) + delete (PseudoExample01*)self; +} + + /* method/function dispatching -------------------------------------------- */ -template -static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - T result = T(); +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + int result = 0; switch ((long)method) { - case 0: // double staticAddToDouble(double) - assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; - break; - case 1: // int staticAddOneToInt(int) + case 1: // static int staticAddOneToInt(int) assert(!self && nargs == 1); result = ((CPPYY_G__value*)args)[0].obj.in + 1; break; - case 2: // int staticAddOneToInt(int, int) + case 2: // static int staticAddOneToInt(int, int) assert(!self && nargs == 2); result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; break; - case 3: // int staticAtoi(const char* str) + case 3: // static int staticAtoi(const char* str) assert(!self && nargs == 1); result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; + case 8: // static int getCount() + assert(!self && nargs == 0); + // can't actually call this method (would need to resolve example01::count), but + // other than the memory tests, most tests just check for 0 at the end + result = 0; + break; default: + assert(!"method unknown in cppyy_call_i"); break; } return result; } -int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); -} - long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + if ((long)method == 4) { // static char* staticStrcpy(const char* strin) + const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); + char* strout = (char*)malloc(::strlen(strin)+1); + ::strcpy(strout, strin); + return (long)strout; + } + assert(!"method unknown in cppyy_call_l"); + return 0; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - return cppyy_call_T(method, self, nargs, args); + double result = 0.; + switch ((long)method) { + case 0: // static double staticAddToDouble(double) + assert(!self && nargs == 1); + result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + break; + case 12: // double addDataToDouble(double a) + assert(self && nargs == 1); + result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + break; + default: + assert(!"method unknown in cppyy_call_d"); + break; + } + return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { @@ -149,10 +224,31 @@ return strout; } +cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { + void* result = 0; + if (handle == s_handles["example01"]) { + switch ((long)method) { + case 10: + assert(nargs == 0); + result = new PseudoExample01; + break; + case 11: + assert(nargs == 1); + result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + break; + default: + assert(!"method unknown in cppyy_constructor"); + break; + } + } + return (cppyy_object_t)result; +} + cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { return (cppyy_methptrgetter_t)0; } + /* handling of function argument buffer ----------------------------------- */ void* cppyy_allocate_function_args(size_t nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); @@ -200,7 +296,11 @@ } int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { - return 1; + return 0; +} + +int cppyy_num_bases(cppyy_type_t /*handle*/) { + return 0; } @@ -252,11 +352,16 @@ /* method properties ----------------------------------------------------- */ -int cppyy_is_constructor(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return example01_last_static_method <= method_index + && method_index < example01_last_constructor; return 0; } -int cppyy_is_staticmethod(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { +int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return method_index < example01_last_static_method ? 1 : 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,13 +7,18 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not item.location[0] in ['test_helper.py', 'test_cppyy.py']: + if not ('test_helper.py' in item.location[0] or \ + 'test_cppyy.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if item.location[0] == 'test_cppyy.py' and \ - not re.search("test0[1-3]", item.location[2]): + if 'test_cppyy.py' in item.location[0] and \ + not re.search("test0[1-36]", item.location[2]): py.test.skip("genreflex is not installed") +def pytest_ignore_collect(path, config): + if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: + return True # "can't run dummy tests in -A" + def pytest_configure(config): if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi @@ -21,6 +26,9 @@ import ctypes ctypes.CDLL(lcapi.reflection_library) except Exception, e: + if config.option.runappdirect: + return # "can't run dummy tests in -A" + # build dummy backend (which has reflex info and calls hard-wired) import os from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -104,6 +104,7 @@ class dummy: pass self.config = dummy() self.config.translating = False + self.BUF_SIMPLE = 1 def issequence_w(self, w_obj): return True @@ -132,7 +133,10 @@ return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' - def buffer_w(self, w_obj): + def buffer_w(self, w_obj, flags): + return FakeBuffer(w_obj) + + def readbuf_w(self, w_obj): return FakeBuffer(w_obj) def exception_match(self, typ, sub): diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,7 +22,6 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod -from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject @@ -474,7 +473,7 @@ "PyLong_Type": "space.w_long", "PyComplex_Type": "space.w_complex", "PyByteArray_Type": "space.w_bytearray", - "PyMemoryView_Type": "space.gettypeobject(W_MemoryView.typedef)", + "PyMemoryView_Type": "space.w_memoryview", "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", "PyBaseObject_Type": "space.w_object", 'PyNone_Type': 'space.type(space.w_None)', diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -1,12 +1,12 @@ +from rpython.rlib.buffer import StringBuffer, SubBuffer from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.buffer import StringBuffer, SubBuffer from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref from pypy.module.array.interp_array import ArrayBuffer -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -15,8 +15,8 @@ from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments From noreply at buildbot.pypy.org Fri Apr 25 18:55:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 18:55:07 +0200 (CEST) Subject: [pypy-commit] pypy default: seems str_w is sufficient here Message-ID: <20140425165507.95E991C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70974:044a3faede8c Date: 2014-04-25 11:53 -0400 http://bitbucket.org/pypy/pypy/changeset/044a3faede8c/ Log: seems str_w is sufficient here diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -193,7 +193,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.bufferstr_w(w_value) + value = space.str_w(w_value) if start + len(value) != stop: raise OperationError(space.w_ValueError, space.wrap("cannot resize array")) From noreply at buildbot.pypy.org Fri Apr 25 18:55:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 18:55:08 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix file.writelines(buffer) behavior to match CPython Message-ID: <20140425165508.DD3A01C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70975:f8870a4ed20a Date: 2014-04-25 12:54 -0400 http://bitbucket.org/pypy/pypy/changeset/f8870a4ed20a/ Log: test/fix file.writelines(buffer) behavior to match CPython diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -468,7 +468,12 @@ if not e.match(space, space.w_StopIteration): raise break # done - self.file_write(w_line) + try: + line = w_line.charbuf_w(space) + except TypeError: + raise OperationError(space.w_TypeError, space.wrap( + "writelines() argument must be a sequence of strings")) + self.file_write(space.wrap(line)) def file_readinto(self, w_rwbuffer): """readinto() -> Undocumented. Don't use this; it may go away.""" diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -386,6 +386,27 @@ assert len(somelines) > 200 assert somelines == lines[:len(somelines)] + def test_writelines(self): + import array + fn = self.temptestfile + with file(fn, 'w') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'r').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == ['abcdef'] + def test_nasty_writelines(self): # The stream lock should be released between writes fn = self.temptestfile From noreply at buildbot.pypy.org Fri Apr 25 19:08:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 19:08:44 +0200 (CEST) Subject: [pypy-commit] pypy default: check types in file.writelines before writing Message-ID: <20140425170844.8A1041C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70976:82cb77058ff2 Date: 2014-04-25 13:07 -0400 http://bitbucket.org/pypy/pypy/changeset/82cb77058ff2/ Log: check types in file.writelines before writing diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -460,20 +460,18 @@ space = self.space self.check_closed() - w_iterator = space.iter(w_lines) - while True: - try: - w_line = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - try: - line = w_line.charbuf_w(space) - except TypeError: - raise OperationError(space.w_TypeError, space.wrap( - "writelines() argument must be a sequence of strings")) - self.file_write(space.wrap(line)) + lines = space.fixedview(w_lines) + for i, w_line in enumerate(lines): + if not space.isinstance_w(w_line, space.w_str): + try: + line = w_line.charbuf_w(space) + except TypeError: + raise OperationError(space.w_TypeError, space.wrap( + "writelines() argument must be a sequence of strings")) + else: + lines[i] = space.wrap(line) + for w_line in lines: + self.file_write(w_line) def file_readinto(self, w_rwbuffer): """readinto() -> Undocumented. Don't use this; it may go away.""" diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -407,6 +407,11 @@ assert str(exc.value) == "writelines() argument must be a sequence of strings" assert open(fn, 'rb').readlines() == ['abcdef'] + with file(fn, 'wb') as f: + exc = raises(TypeError, f.writelines, ['abc', memoryview('def')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == [] + def test_nasty_writelines(self): # The stream lock should be released between writes fn = self.temptestfile From noreply at buildbot.pypy.org Fri Apr 25 19:16:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 19:16:24 +0200 (CEST) Subject: [pypy-commit] pypy default: unicode_internal_decode accepts readbuf Message-ID: <20140425171624.DC0081C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70977:629da2d06746 Date: 2014-04-25 13:15 -0400 http://bitbucket.org/pypy/pypy/changeset/629da2d06746/ Log: unicode_internal_decode accepts readbuf diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -679,7 +679,7 @@ if space.isinstance_w(w_string, space.w_unicode): return space.newtuple([w_string, space.len(w_string)]) - string = space.str_w(w_string) + string = space.readbuf_w(w_string).as_str() if len(string) == 0: return space.newtuple([space.wrap(u''), space.wrap(0)]) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -276,7 +276,7 @@ assert enc == "a\x00\x00\x00" def test_unicode_internal_decode(self): - import sys + import sys, _codecs, array if sys.maxunicode == 65535: # UCS2 build if sys.byteorder == "big": bytes = "\x00a" @@ -291,6 +291,9 @@ bytes2 = "\x98\x00\x01\x00" assert bytes2.decode("unicode_internal") == u"\U00010098" assert bytes.decode("unicode_internal") == u"a" + assert _codecs.unicode_internal_decode(array.array('c', bytes))[0] == u"a" + exc = raises(TypeError, _codecs.unicode_internal_decode, memoryview(bytes)) + assert str(exc.value) == "expected a readable buffer object" def test_raw_unicode_escape(self): assert unicode("\u0663", "raw-unicode-escape") == u"\u0663" From noreply at buildbot.pypy.org Fri Apr 25 19:34:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 19:34:34 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix some compile() behaviors Message-ID: <20140425173434.3CD9E1C33B0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70978:0b124540057c Date: 2014-04-25 13:33 -0400 http://bitbucket.org/pypy/pypy/changeset/0b124540057c/ Log: test/fix some compile() behaviors diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -22,22 +22,6 @@ compile; if absent or zero these statements do influence the compilation, in addition to any features explicitly specified. """ - - ast_node = None - w_ast_type = space.gettypeobject(ast.AST.typedef) - str_ = None - if space.isinstance_w(w_source, w_ast_type): - ast_node = space.interp_w(ast.mod, w_source) - ast_node.sync_app_attrs(space) - elif space.isinstance_w(w_source, space.w_unicode): - w_utf_8_source = space.call_method(w_source, "encode", - space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) - # This flag tells the parser to reject any coding cookies it sees. - flags |= consts.PyCF_SOURCE_IS_UTF8 - else: - str_ = space.str_w(w_source) - ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): @@ -53,14 +37,30 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - if ast_node is None: - if flags & consts.PyCF_ONLY_AST: - mod = ec.compiler.compile_to_ast(str_, filename, mode, flags) - return space.wrap(mod) - else: - code = ec.compiler.compile(str_, filename, mode, flags) + w_ast_type = space.gettypeobject(ast.AST.typedef) + if space.isinstance_w(w_source, w_ast_type): + ast_node = space.interp_w(ast.mod, w_source) + ast_node.sync_app_attrs(space) + code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + return space.wrap(code) + + if space.isinstance_w(w_source, space.w_unicode): + w_utf_8_source = space.call_method(w_source, "encode", + space.wrap("utf-8")) + str_ = space.str_w(w_utf_8_source) + # This flag tells the parser to reject any coding cookies it sees. + flags |= consts.PyCF_SOURCE_IS_UTF8 else: - code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + str_ = space.readbuf_w(w_source).as_str() + + if '\x00' in str_: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) + + if flags & consts.PyCF_ONLY_AST: + code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + else: + code = ec.compiler.compile(str_, filename, mode, flags) return space.wrap(code) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -490,6 +490,14 @@ def test_compile(self): co = compile('1+2', '?', 'eval') assert eval(co) == 3 + co = compile(buffer('1+2'), '?', 'eval') + assert eval(co) == 3 + exc = raises(TypeError, compile, chr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, unichr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, memoryview('1+2'), '?', 'eval') + assert str(exc.value) == "expected a readable buffer object" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') raises(ValueError, compile, '"\\xt"', '?', 'eval') From noreply at buildbot.pypy.org Fri Apr 25 21:43:48 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 21:43:48 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: provide the fromkeys fastpath, differently from default, since it's based Message-ID: <20140425194349.017F21C03FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70979:f44626e10d82 Date: 2014-04-25 11:45 -0700 http://bitbucket.org/pypy/pypy/changeset/f44626e10d82/ Log: provide the fromkeys fastpath, differently from default, since it's based around unicode diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -113,16 +113,15 @@ if w_fill is None: w_fill = space.w_None if space.is_w(w_type, space.w_dict): - w_dict = W_DictMultiObject.allocate_and_init_instance(space, - w_type) - - byteslist = space.listview_bytes(w_keys) - # XXX: py3k could switch this to listview_unicode, but our - # setitem_str accepts utf-8 encoded strs, not unicode! - if False and byteslist is not None: - for key in byteslist: - w_dict.setitem_str(key, w_fill) + ulist = space.listview_unicode(w_keys) + if ulist is not None: + strategy = space.fromcache(UnicodeDictStrategy) + storage = strategy.get_storage_fromkeys(ulist, w_fill) + w_dict = space.allocate_instance(W_DictMultiObject, w_type) + W_DictMultiObject.__init__(w_dict, space, strategy, storage) else: + w_dict = W_DictMultiObject.allocate_and_init_instance(space, + w_type) for w_key in space.listview(w_keys): w_dict.setitem(w_key, w_fill) else: @@ -943,6 +942,14 @@ i += 1 return keys, values + def get_storage_fromkeys(self, keys_w, w_fill): + """Return an initialized storage with keys and fill values""" + storage = {} + mark_dict_non_null(storage) + for key in keys_w: + storage[key] = w_fill + return self.erase(storage) + create_iterator_classes(UnicodeDictStrategy) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -127,9 +127,8 @@ def test_fromkeys_fastpath(self): space = self.space w = space.wrap - wb = space.wrapbytes - w_l = self.space.newlist([wb("a"),wb("b")]) + w_l = space.newlist([w("a"),w("b")]) w_l.getitems = None w_d = space.call_method(space.w_dict, "fromkeys", w_l) From noreply at buildbot.pypy.org Fri Apr 25 21:43:50 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 21:43:50 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: provide a listview_int for bytes Message-ID: <20140425194350.2EC661C03FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70980:8dabe468ecab Date: 2014-04-25 12:42 -0700 http://bitbucket.org/pypy/pypy/changeset/8dabe468ecab/ Log: provide a listview_int for bytes diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -401,9 +401,8 @@ def buffer_w(w_self, space): return StringBuffer(w_self._value) - # XXX: could provide listview_int? - #def listview_bytes(self): - # return _create_list_from_bytes(self._value) + def listview_int(self): + return _create_list_from_bytes(self._value) def ord(self, space): if len(self._value) != 1: @@ -646,8 +645,8 @@ def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline - # listview_bytes - return [s for s in value] + # listview_int + return [ord(s) for s in value] W_BytesObject.EMPTY = W_BytesObject('') W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -500,6 +500,9 @@ return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() + if type(w_obj) is W_BytesObject: + # Python3 considers bytes strings as a list of numbers. + return w_obj.listview_int() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_int() return None diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -82,10 +82,11 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), wb('el')) - def test_listview_bytes(self): + def test_listview_bytes_int(self): w_bytes = self.space.wrapbytes('abcd') # list(b'abcd') is a list of numbers assert self.space.listview_bytes(w_bytes) == None + assert self.space.listview_int(w_bytes) == [97, 98, 99, 100] class AppTestBytesObject: diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -136,8 +136,8 @@ w_a = W_SetObject(self.space) _initialize_set(self.space, w_a, wb("abcdefg")) - assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") - assert self.space.listview_int(w_a) is None + assert sorted(self.space.listview_int(w_a)) == [97, 98, 99, 100, 101, 102, 103] + assert self.space.listview_bytes(w_a) is None w_b = W_SetObject(self.space) _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) From noreply at buildbot.pypy.org Fri Apr 25 21:58:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 25 Apr 2014 21:58:17 +0200 (CEST) Subject: [pypy-commit] pypy default: pypyjit test of regex match using buffer Message-ID: <20140425195817.536821C1008@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70981:28a72900dee3 Date: 2014-04-25 15:57 -0400 http://bitbucket.org/pypy/pypy/changeset/28a72900dee3/ Log: pypyjit test of regex match using buffer diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -0,0 +1,28 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestBuffers(BaseTestPyPyC): + def test_re_match(self): + def main(): + import re + import array + p = re.compile('.+') + a = array.array('c', 'test' * 1000) + i = 0 + while i < 5000: + i += 1 + p.match(a) # ID: match + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('match', """ + guard_not_invalidated(descr=...) + i65 = getfield_gc(p18, descr=...) + i67 = int_gt(0, i65) + guard_false(i67, descr=...) + i69 = int_gt(., i65) + guard_true(i69, descr=...) + guard_not_invalidated(descr=...) + i74 = getfield_raw(., descr=...) + i75 = int_lt(i74, 0) + guard_false(i75, descr=...) + """) From noreply at buildbot.pypy.org Fri Apr 25 23:17:43 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 23:17:43 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: utilize decodekey_str Message-ID: <20140425211743.189071C03FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70982:c9d410e044b3 Date: 2014-04-25 13:04 -0700 http://bitbucket.org/pypy/pypy/changeset/c9d410e044b3/ Log: utilize decodekey_str diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -151,7 +151,7 @@ storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) for i in range(len(keys)): - d_new[keys[i].decode('utf-8')] = values_w[i] + d_new[self.decodekey_str(keys[i])] = values_w[i] w_dict.strategy = strategy w_dict.dstorage = storage From noreply at buildbot.pypy.org Fri Apr 25 23:17:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 23:17:44 +0200 (CEST) Subject: [pypy-commit] pypy py3k-fix-strategies: close to be merged branch Message-ID: <20140425211744.4331F1C03FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-fix-strategies Changeset: r70983:25c582216b18 Date: 2014-04-25 14:16 -0700 http://bitbucket.org/pypy/pypy/changeset/25c582216b18/ Log: close to be merged branch From noreply at buildbot.pypy.org Fri Apr 25 23:17:46 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Apr 2014 23:17:46 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge py3k-fix-strategies: re-enables the disabled strategies (issue1471) Message-ID: <20140425211746.2D98A1C03FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70984:092f39d9ab1c Date: 2014-04-25 14:17 -0700 http://bitbucket.org/pypy/pypy/changeset/092f39d9ab1c/ Log: merge py3k-fix-strategies: re-enables the disabled strategies (issue1471) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,9 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + try: + imp.load_module('_testcapi', fp, filename, description) + finally: + fp.close() except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -351,7 +351,7 @@ limit -= len(self.keyword_names_w) for i in range(len(self.keywords)): if i < limit: - w_key = space.wrap(self.keywords[i]) + w_key = space.wrap(self.keywords[i].decode('utf-8')) else: w_key = self.keyword_names_w[i - limit] space.setitem(w_kwds, w_key, self.keywords_w[i]) @@ -446,7 +446,7 @@ break else: if i < limit: - w_key = space.wrap(keywords[i]) + w_key = space.wrap(keywords[i].decode('utf-8')) else: w_key = keyword_names_w[i - limit] space.setitem(w_kwds, w_key, keywords_w[i]) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -433,6 +433,9 @@ def getconstant_w(self, index): return self.getcode().co_consts_w[index] + def getname_u(self, index): + return self.space.identifier_w(self.getname_w(index)) + def getname_w(self, index): return self.getcode().co_names_w[index] @@ -753,9 +756,9 @@ self.pushvalue(w_build_class) def STORE_NAME(self, varindex, next_instr): - w_varname = self.getname_w(varindex) + varname = self.getname_u(varindex) w_newvalue = self.popvalue() - self.space.setitem(self.w_locals, w_varname, w_newvalue) + self.space.setitem_str(self.w_locals, varname, w_newvalue) def DELETE_NAME(self, varindex, next_instr): w_varname = self.getname_w(varindex) @@ -765,8 +768,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - raise oefmt(self.space.w_NameError, "name '%s' is not defined", - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, + "name %R is not defined", w_varname) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -817,7 +820,7 @@ self.space.delattr(w_obj, w_attributename) def STORE_GLOBAL(self, nameindex, next_instr): - varname = self.space.str_w(self.getname_w(nameindex)) + varname = self.getname_u(nameindex) w_newvalue = self.popvalue() self.space.setitem_str(self.w_globals, varname, w_newvalue) @@ -827,24 +830,24 @@ def LOAD_NAME(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) + varname = self.space.identifier_w(w_varname) if self.w_locals is not self.w_globals: - w_value = self.space.finditem(self.w_locals, w_varname) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return # fall-back - w_value = self._load_global(w_varname) + w_value = self._load_global(varname) if w_value is None: raise oefmt(self.space.w_NameError, "name %R is not defined", w_varname) self.pushvalue(w_value) - def _load_global(self, w_varname): - w_value = self.space.finditem(self.w_globals, w_varname) + def _load_global(self, varname): + w_value = self.space.finditem_str(self.w_globals, varname) if w_value is None: # not in the globals, now look in the built-ins - w_value = self.get_builtin().getdictvalue( - self.space, self.space.identifier_w(w_varname)) + w_value = self.get_builtin().getdictvalue(self.space, varname) return w_value _load_global._always_inline_ = True @@ -855,7 +858,7 @@ def LOAD_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - w_value = self._load_global(w_varname) + w_value = self._load_global(self.space.identifier_w(w_varname)) if w_value is None: self._load_global_failed(w_varname) self.pushvalue(w_value) @@ -993,7 +996,7 @@ if not e.match(self.space, self.space.w_AttributeError): raise raise oefmt(self.space.w_ImportError, - "cannot import name '%s'", self.space.str_w(w_name)) + "cannot import name %R", w_name) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -75,21 +75,18 @@ assert x == 42 def test_list_strategy(self): - py3k_skip("XXX: strategies are currently broken") from __pypy__ import list_strategy l = [1, 2, 3] assert list_strategy(l) == "int" + l = list(range(1, 2)) + assert list_strategy(l) == "int" l = [b"a", b"b", b"c"] assert list_strategy(l) == "bytes" l = ["a", "b", "c"] assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" - l = range(3) - assert list_strategy(l) == "simple_range" - l = range(1, 2) - assert list_strategy(l) == "range" l = [1, "b", 3] assert list_strategy(l) == "object" l = [] diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -16,7 +16,6 @@ W_CType.pack_list_of_items = self._original def test_fast_init_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend LONG = _cffi_backend.new_primitive_type('long') P_LONG = _cffi_backend.new_pointer_type(LONG) @@ -37,7 +36,6 @@ assert buf[2] == 3.3 def test_fast_init_short_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend SHORT = _cffi_backend.new_primitive_type('short') P_SHORT = _cffi_backend.new_pointer_type(SHORT) @@ -50,7 +48,6 @@ raises(OverflowError, _cffi_backend.newp, SHORT_ARRAY, [-40000]) def test_fast_init_longlong_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend import sys large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) @@ -64,7 +61,6 @@ assert buf[3] == large_int def test_fast_init_ushort_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend USHORT = _cffi_backend.new_primitive_type('unsigned short') P_USHORT = _cffi_backend.new_pointer_type(USHORT) @@ -77,18 +73,17 @@ raises(OverflowError, _cffi_backend.newp, USHORT_ARRAY, [-1]) def test_fast_init_ulong_from_list(self): - py3k_skip('XXX: strategies are currently broken') import sys import _cffi_backend ULONG = _cffi_backend.new_primitive_type('unsigned long') P_ULONG = _cffi_backend.new_pointer_type(ULONG) ULONG_ARRAY = _cffi_backend.new_array_type(P_ULONG, None) - buf = _cffi_backend.newp(ULONG_ARRAY, [1, 2, sys.maxint]) + buf = _cffi_backend.newp(ULONG_ARRAY, [1, 2, sys.maxsize]) assert buf[0] == 1 assert buf[1] == 2 - assert buf[2] == sys.maxint + assert buf[2] == sys.maxsize raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-1]) - raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-sys.maxint]) + raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-sys.maxsize]) def test_fast_init_cfloat_from_list(self): import _cffi_backend @@ -109,7 +104,6 @@ assert float(buf[1]) == -3.5 def test_fast_init_bool_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend BOOL = _cffi_backend.new_primitive_type('_Bool') P_BOOL = _cffi_backend.new_pointer_type(BOOL) diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -219,7 +219,8 @@ def cloexec_pipe(space): - """"cloexec_pipe() -> (read_end, write_end) + """cloexec_pipe() -> (read_end, write_end) + Create a pipe whose ends have the cloexec flag set.""" with lltype.scoped_alloc(rffi.CArrayPtr(rffi.INT).TO, 2) as fds: diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -401,7 +401,7 @@ def buffer_w(w_self, space): return StringBuffer(w_self._value) - def listview_bytes(self): + def listview_int(self): return _create_list_from_bytes(self._value) def ord(self, space): @@ -620,8 +620,8 @@ l = space.listview_bytes(w_list) if l is not None: if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) + return space.wrapbytes(l[0]) + return space.wrapbytes(self._val(space).join(l)) return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): @@ -645,8 +645,8 @@ def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline - # listview_bytes - return [s for s in value] + # listview_int + return [ord(s) for s in value] W_BytesObject.EMPTY = W_BytesObject('') W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -30,7 +30,7 @@ def _wrapkey(space, key): - return space.wrap(key) + return space.wrap(key.decode('utf-8')) class ModuleDictStrategy(DictStrategy): @@ -63,7 +63,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) @@ -85,7 +85,7 @@ def setdefault(self, w_dict, w_key, w_default): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) w_result = self.getitem_str(w_dict, key) if w_result is not None: @@ -99,7 +99,7 @@ def delitem(self, w_dict, w_key): space = self.space w_key_type = space.type(w_key) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_unicode): key = space.str_w(w_key) dict_w = self.unerase(w_dict.dstorage) try: @@ -120,7 +120,7 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if space.is_w(w_lookup_type, space.w_unicode): return self.getitem_str(w_dict, space.str_w(w_key)) elif _never_equal_to_string(space, w_lookup_type): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -55,10 +55,10 @@ elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) - #elif instance or strdict or module: - # assert w_type is None - # strategy = space.fromcache(BytesDictStrategy) - elif False and kwargs: + elif instance or strdict or module: + assert w_type is None + strategy = space.fromcache(UnicodeDictStrategy) + elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy strategy = space.fromcache(EmptyKwargsDictStrategy) @@ -113,14 +113,15 @@ if w_fill is None: w_fill = space.w_None if space.is_w(w_type, space.w_dict): - w_dict = W_DictMultiObject.allocate_and_init_instance(space, - w_type) - - byteslist = space.listview_bytes(w_keys) - if byteslist is not None: - for key in byteslist: - w_dict.setitem_str(key, w_fill) + ulist = space.listview_unicode(w_keys) + if ulist is not None: + strategy = space.fromcache(UnicodeDictStrategy) + storage = strategy.get_storage_fromkeys(ulist, w_fill) + w_dict = space.allocate_instance(W_DictMultiObject, w_type) + W_DictMultiObject.__init__(w_dict, space, strategy, storage) else: + w_dict = W_DictMultiObject.allocate_and_init_instance(space, + w_type) for w_key in space.listview(w_keys): w_dict.setitem(w_key, w_fill) else: @@ -360,6 +361,9 @@ def get_empty_storage(self): raise NotImplementedError + def decodekey_str(self, key): + return key.decode('utf-8') + @jit.look_inside_iff(lambda self, w_dict: w_dict_unrolling_heuristic(w_dict)) def w_keys(self, w_dict): @@ -430,18 +434,18 @@ return self.erase(None) def switch_to_correct_strategy(self, w_dict, w_key): + from pypy.objspace.std.intobject import W_IntObject withidentitydict = self.space.config.objspace.std.withidentitydict - # if type(w_key) is self.space.StringObjectCls: - # self.switch_to_bytes_strategy(w_dict) - # return + if type(w_key) is self.space.StringObjectCls: + self.switch_to_bytes_strategy(w_dict) + return if type(w_key) is self.space.UnicodeObjectCls: self.switch_to_unicode_strategy(w_dict) return + if type(w_key) is W_IntObject: + self.switch_to_int_strategy(w_dict) + return w_type = self.space.type(w_key) - # XXX: disable IntDictStrategy for now, because in py3k ints are - # actually long - ## if self.space.is_w(w_type, self.space.w_int): - ## self.switch_to_int_strategy(w_dict) if withidentitydict and w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: @@ -500,7 +504,7 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.switch_to_bytes_strategy(w_dict) + self.switch_to_unicode_strategy(w_dict) w_dict.setitem_str(key, w_value) def delitem(self, w_dict, w_key): @@ -700,7 +704,7 @@ def setitem_str(self, w_dict, key, w_value): self.switch_to_object_strategy(w_dict) - w_dict.setitem(self.space.wrap(key), w_value) + w_dict.setitem(self.space.wrap(self.decodekey_str(key)), w_value) def setdefault(self, w_dict, w_key, w_default): if self.is_correct_type(w_key): @@ -722,7 +726,7 @@ return len(self.unerase(w_dict.dstorage)) def getitem_str(self, w_dict, key): - return self.getitem(w_dict, self.space.wrap(key)) + return self.getitem(w_dict, self.space.wrap(self.decodekey_str(key))) def getitem(self, w_dict, w_key): space = self.space @@ -802,7 +806,7 @@ return self.space.newlist(self.unerase(w_dict.dstorage).keys()) def setitem_str(self, w_dict, s, w_value): - self.setitem(w_dict, self.space.wrap(s), w_value) + self.setitem(w_dict, self.space.wrap(self.decodekey_str(s)), w_value) def switch_to_object_strategy(self, w_dict): assert 0, "should be unreachable" @@ -816,10 +820,10 @@ unerase = staticmethod(unerase) def wrap(self, unwrapped): - return self.space.wrap(unwrapped) + return self.space.wrapbytes(unwrapped) def unwrap(self, wrapped): - return self.space.str_w(wrapped) + return self.space.bytes_w(wrapped) def is_correct_type(self, w_obj): space = self.space @@ -833,21 +837,21 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) - def setitem_str(self, w_dict, key, w_value): - assert key is not None - self.unerase(w_dict.dstorage)[key] = w_value + ##def setitem_str(self, w_dict, key, w_value): + ## assert key is not None + ## self.unerase(w_dict.dstorage)[key] = w_value - def getitem(self, w_dict, w_key): - space = self.space - # -- This is called extremely often. Hack for performance -- - if type(w_key) is space.StringObjectCls: - return self.getitem_str(w_dict, w_key.unwrap(space)) - # -- End of performance hack -- - return AbstractTypedStrategy.getitem(self, w_dict, w_key) + ##def getitem(self, w_dict, w_key): + ## space = self.space + ## # -- This is called extremely often. Hack for performance -- + ## if type(w_key) is space.StringObjectCls: + ## return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None) + ## # -- End of performance hack -- + ## return AbstractTypedStrategy.getitem(self, w_dict, w_key) - def getitem_str(self, w_dict, key): - assert key is not None - return self.unerase(w_dict.dstorage).get(key, None) + ##def getitem_str(self, w_dict, key): + ## assert key is not None + ## return self.unerase(w_dict.dstorage).get(key, None) def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() @@ -856,21 +860,21 @@ return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): - return space.wrap(key) + return space.wrapbytes(key) - @jit.look_inside_iff(lambda self, w_dict: - w_dict_unrolling_heuristic(w_dict)) - def view_as_kwargs(self, w_dict): - return (None, None) # XXX: fix me to return unicode keys - d = self.unerase(w_dict.dstorage) - l = len(d) - keys, values = [None] * l, [None] * l - i = 0 - for key, val in d.iteritems(): - keys[i] = key - values[i] = val - i += 1 - return keys, values + ##@jit.look_inside_iff(lambda self, w_dict: + ## w_dict_unrolling_heuristic(w_dict)) + ##def view_as_kwargs(self, w_dict): + ## return (None, None) # XXX: fix me to return unicode keys + ## d = self.unerase(w_dict.dstorage) + ## l = len(d) + ## keys, values = [None] * l, [None] * l + ## i = 0 + ## for key, val in d.iteritems(): + ## keys[i] = key + ## values[i] = val + ## i += 1 + ## return keys, values create_iterator_classes(BytesDictStrategy) @@ -900,43 +904,51 @@ # we should implement the same shortcuts as we do for BytesDictStrategy - ## def setitem_str(self, w_dict, key, w_value): - ## assert key is not None - ## self.unerase(w_dict.dstorage)[key] = w_value + def setitem_str(self, w_dict, key, w_value): + assert key is not None + self.unerase(w_dict.dstorage)[self.decodekey_str(key)] = w_value - ## def getitem(self, w_dict, w_key): - ## space = self.space - ## # -- This is called extremely often. Hack for performance -- - ## if type(w_key) is space.StringObjectCls: - ## return self.getitem_str(w_dict, w_key.unwrap(space)) - ## # -- End of performance hack -- - ## return AbstractTypedStrategy.getitem(self, w_dict, w_key) + def getitem(self, w_dict, w_key): + space = self.space + # -- This is called extremely often. Hack for performance -- + if type(w_key) is space.UnicodeObjectCls: + return self.unerase(w_dict.dstorage).get(w_key.unwrap(space), None) + # -- End of performance hack -- + return AbstractTypedStrategy.getitem(self, w_dict, w_key) - ## def getitem_str(self, w_dict, key): - ## assert key is not None - ## return self.unerase(w_dict.dstorage).get(key, None) + def getitem_str(self, w_dict, key): + assert key is not None + return self.unerase(w_dict.dstorage).get(self.decodekey_str(key), None) def listview_unicode(self, w_dict): return self.unerase(w_dict.dstorage).keys() - ## def w_keys(self, w_dict): - ## return self.space.newlist_bytes(self.listview_bytes(w_dict)) + def w_keys(self, w_dict): + return self.space.newlist_unicode(self.listview_unicode(w_dict)) def wrapkey(space, key): return space.wrap(key) - ## @jit.look_inside_iff(lambda self, w_dict: - ## w_dict_unrolling_heuristic(w_dict)) - ## def view_as_kwargs(self, w_dict): - ## d = self.unerase(w_dict.dstorage) - ## l = len(d) - ## keys, values = [None] * l, [None] * l - ## i = 0 - ## for key, val in d.iteritems(): - ## keys[i] = key - ## values[i] = val - ## i += 1 - ## return keys, values + @jit.look_inside_iff(lambda self, w_dict: + w_dict_unrolling_heuristic(w_dict)) + def view_as_kwargs(self, w_dict): + d = self.unerase(w_dict.dstorage) + l = len(d) + keys, values = [None] * l, [None] * l + i = 0 + for key, val in d.iteritems(): + keys[i] = key.encode('utf-8') + values[i] = val + i += 1 + return keys, values + + def get_storage_fromkeys(self, keys_w, w_fill): + """Return an initialized storage with keys and fill values""" + storage = {} + mark_dict_non_null(storage) + for key in keys_w: + storage[key] = w_fill + return self.erase(storage) create_iterator_classes(UnicodeDictStrategy) @@ -956,8 +968,8 @@ return self.erase({}) def is_correct_type(self, w_obj): - space = self.space - return space.is_w(space.type(w_obj), space.w_int) + from pypy.objspace.std.intobject import W_IntObject + return type(w_obj) is W_IntObject def _never_equal_to(self, w_lookup_type): space = self.space diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -6,16 +6,16 @@ from rpython.rlib import jit, rerased from pypy.objspace.std.dictmultiobject import ( - BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, + DictStrategy, EmptyDictStrategy, ObjectDictStrategy, UnicodeDictStrategy, create_iterator_classes) def _wrapkey(space, key): - return space.wrap(key) + return space.wrap(key.decode('utf-8')) class EmptyKwargsDictStrategy(EmptyDictStrategy): - def switch_to_bytes_strategy(self, w_dict): + def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy @@ -39,7 +39,7 @@ def is_correct_type(self, w_obj): space = self.space - return space.is_w(space.type(w_obj), space.w_str) + return space.is_w(space.type(w_obj), space.w_unicode) def _never_equal_to(self, w_lookup_type): return False @@ -66,7 +66,7 @@ else: # limit the size so that the linear searches don't become too long if len(keys) >= 16: - self.switch_to_bytes_strategy(w_dict) + self.switch_to_unicode_strategy(w_dict) w_dict.setitem_str(key, w_value) else: keys.append(key) @@ -116,7 +116,7 @@ def w_keys(self, w_dict): l = self.unerase(w_dict.dstorage)[0] - return self.space.newlist_bytes(l[:]) + return self.space.newlist_unicode(l[:]) def values(self, w_dict): return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable @@ -145,13 +145,13 @@ w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) - def switch_to_bytes_strategy(self, w_dict): - strategy = self.space.fromcache(BytesDictStrategy) + def switch_to_unicode_strategy(self, w_dict): + strategy = self.space.fromcache(UnicodeDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) for i in range(len(keys)): - d_new[keys[i]] = values_w[i] + d_new[self.decodekey_str(keys[i])] = values_w[i] w_dict.strategy = strategy w_dict.dstorage = storage diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -80,14 +80,11 @@ return space.fromcache(IntegerListStrategy) # check for strings - # XXX: StringListStrategy is currently broken - """ for w_obj in list_w: if not type(w_obj) is W_BytesObject: break else: return space.fromcache(BytesListStrategy) - """ # check for unicode for w_obj in list_w: @@ -166,12 +163,11 @@ self.switch_to_object_strategy() return self - # XXX: BytesListStrategy is currently broken - #@staticmethod - #def newlist_bytes(space, list_b): - # strategy = space.fromcache(BytesListStrategy) - # storage = strategy.erase(list_b) - # return W_ListObject.from_storage_and_strategy(space, storage, strategy) + @staticmethod + def newlist_bytes(space, list_b): + strategy = space.fromcache(BytesListStrategy) + storage = strategy.erase(list_b) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) @staticmethod def newlist_unicode(space, list_u): @@ -875,8 +871,8 @@ def switch_to_correct_strategy(self, w_list, w_item): if type(w_item) is W_IntObject: strategy = self.space.fromcache(IntegerListStrategy) - #elif type(w_item) is W_BytesObject: - # strategy = self.space.fromcache(BytesListStrategy) + elif type(w_item) is W_BytesObject: + strategy = self.space.fromcache(BytesListStrategy) elif type(w_item) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeListStrategy) elif type(w_item) is W_FloatObject: @@ -1662,7 +1658,7 @@ return self.space.wrapbytes(stringval) def unwrap(self, w_string): - return self.space.str_w(w_string) + return self.space.bytes_w(w_string) erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) @@ -1778,7 +1774,7 @@ def lt(self, a, b): return a < b -class StringSort(UnicodeBaseTimSort): +class StringSort(StringBaseTimSort): def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -640,7 +640,7 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if space.is_w(w_lookup_type, space.w_unicode): return self.getitem_str(w_dict, space.str_w(w_key)) elif _never_equal_to_string(space, w_lookup_type): return None @@ -659,7 +659,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): self.setitem_str(w_dict, self.space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) @@ -667,7 +667,7 @@ def setdefault(self, w_dict, w_key, w_default): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) w_result = self.getitem_str(w_dict, key) if w_result is not None: @@ -682,7 +682,7 @@ space = self.space w_key_type = space.type(w_key) w_obj = self.unerase(w_dict.dstorage) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_unicode): key = self.space.str_w(w_key) flag = w_obj.deldictvalue(space, key) if not flag: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -316,10 +316,8 @@ assert not list_w or sizehint == -1 return W_ListObject(self, list_w, sizehint) - # XXX: BytesListStrategy is currently broken use the default - # implementation, which simply wraps - #def newlist_bytes(self, list_s): - # return W_ListObject.newlist_bytes(self, list_s) + def newlist_bytes(self, list_s): + return W_ListObject.newlist_bytes(self, list_s) def newlist_unicode(self, list_u): return W_ListObject.newlist_unicode(self, list_u) @@ -502,6 +500,9 @@ return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() + if type(w_obj) is W_BytesObject: + # Python3 considers bytes strings as a list of numbers. + return w_obj.listview_int() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_int() return None diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1439,7 +1439,7 @@ def next_entry(self): for key in self.iterator: - return self.space.wrap(key) + return self.space.wrapbytes(key) else: return None diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -82,10 +82,11 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), wb('el')) - def test_listview_bytes(self): + def test_listview_bytes_int(self): w_bytes = self.space.wrapbytes('abcd') # list(b'abcd') is a list of numbers assert self.space.listview_bytes(w_bytes) == None + assert self.space.listview_int(w_bytes) == [97, 98, 99, 100] class AppTestBytesObject: diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,15 +1,16 @@ +# encoding: utf-8 import py from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, - FakeString) + FakeUnicode) space = FakeSpace() class TestCellDict(object): - FakeString = FakeString + FakeString = FakeUnicode def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) @@ -50,10 +51,10 @@ v1 = strategy.version x = object() - d.setitem("a", x) + d.setitem(u"a", x) v2 = strategy.version assert v1 is not v2 - d.setitem("a", x) + d.setitem(u"a", x) v3 = strategy.version assert v2 is v3 @@ -70,7 +71,6 @@ assert "ModuleDictStrategy" in __pypy__.internal_repr(obj) def test_check_module_uses_module_dict(self): - py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") m = type(__builtins__)("abc") self.impl_used(m.__dict__) @@ -133,9 +133,12 @@ def setup_class(cls): if cls.runappdirect: py.test.skip("__repr__ doesn't work on appdirect") - strategy = ModuleDictStrategy(cls.space) + + def setup_method(self, method): + space = self.space + strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + self.w_d = W_DictMultiObject(space, strategy, storage) def test_popitem(self): import __pypy__ @@ -148,7 +151,6 @@ assert x == ("a", 3) def test_degenerate(self): - py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") import __pypy__ d = self.d @@ -157,3 +159,23 @@ del d["a"] d[object()] = 5 assert list(d.values()) == [5] + + def test_unicode(self): + import __pypy__ + + d = self.d + assert "ModuleDict" in __pypy__.internal_repr(d) + d['λ'] = True + assert "ModuleDict" in __pypy__.internal_repr(d) + assert list(d) == ['λ'] + assert next(iter(d)) == 'λ' + assert "ModuleDict" in __pypy__.internal_repr(d) + + d['foo'] = 'bar' + assert sorted(d) == ['foo', 'λ'] + assert "ModuleDict" in __pypy__.internal_repr(d) + + o = object() + d[o] = 'baz' + assert set(d) == set(['foo', 'λ', o]) + assert "ObjectDictStrategy" in __pypy__.internal_repr(d) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -3,7 +3,7 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + BytesDictStrategy, ObjectDictStrategy, UnicodeDictStrategy) class TestW_DictObject(object): @@ -125,12 +125,10 @@ assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) def test_fromkeys_fastpath(self): - py.test.py3k_skip("XXX: strategies are currently broken") space = self.space w = space.wrap - wb = space.wrapbytes - w_l = self.space.newlist([wb("a"),wb("b")]) + w_l = space.newlist([w("a"),w("b")]) w_l.getitems = None w_d = space.call_method(space.w_dict, "fromkeys", w_l) @@ -138,7 +136,6 @@ assert space.eq_w(w_d.getitem_str("b"), space.w_None) def test_listview_bytes_dict(self): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap wb = self.space.wrapbytes w_d = self.space.newdict() @@ -152,30 +149,30 @@ assert self.space.listview_unicode(w_d) == [u"a", u"b"] def test_listview_int_dict(self): - py.test.py3k_skip("IntDictStrategy not supported yet") w = self.space.wrap w_d = self.space.newdict() w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) assert self.space.listview_int(w_d) == [1, 2] def test_keys_on_string_unicode_int_dict(self, monkeypatch): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap wb = self.space.wrapbytes w_d = self.space.newdict() w_d.initialize_content([(w(1), wb("a")), (w(2), wb("b"))]) - w_l = self.space.call_method(w_d, "keys") + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_int(w_l)) == [1,2] - # make sure that .keys() calls newlist_bytes for string dicts + # make sure that list(d.keys()) calls newlist_bytes for byte dicts def not_allowed(*args): assert False, 'should not be called' monkeypatch.setattr(self.space, 'newlist', not_allowed) # w_d = self.space.newdict() - w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) - w_l = self.space.call_method(w_d, "keys") + w_d.initialize_content([(wb("a"), w(1)), (wb("b"), w(6))]) + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), @@ -183,7 +180,8 @@ monkeypatch.undo() w_d = self.space.newdict() w_d.initialize_content([(w(u"a"), w(1)), (w(u"b"), w(6))]) - w_l = self.space.call_method(w_d, "keys") + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_unicode(w_l)) == [u"a", u"b"] class AppTest_DictObject: @@ -952,10 +950,9 @@ return r[r.find("(") + 1: r.find(")")] def test_empty_to_string(self): - py3k_skip("StringDictStrategy not supported yet") d = {} assert "EmptyDictStrategy" in self.get_strategy(d) - d["a"] = 1 + d[b"a"] = 1 assert "BytesDictStrategy" in self.get_strategy(d) class O(object): @@ -964,7 +961,7 @@ d = o.__dict__ = {} assert "EmptyDictStrategy" in self.get_strategy(d) o.a = 1 - assert "BytesDictStrategy" in self.get_strategy(d) + assert "UnicodeDictStrategy" in self.get_strategy(d) def test_empty_to_unicode(self): d = {} @@ -1017,9 +1014,16 @@ # gives us (1, 2), but 1 is not in the dict any longer. #raises(RuntimeError, list, it) + def test_bytes_to_object(self): + d = {b'a': 'b'} + d[object()] = None + assert b'a' in list(d) -class FakeWrapper(object): + +class FakeString(str): + hash_count = 0 + def unwrap(self, space): self.unwrapped = True return str(self) @@ -1028,11 +1032,18 @@ self.hash_count += 1 return str.__hash__(self) -class FakeString(FakeWrapper, str): - pass +class FakeUnicode(unicode): -class FakeUnicode(FakeWrapper, unicode): - pass + hash_count = 0 + + def unwrap(self, space): + self.unwrapped = True + return unicode(self) + + def __hash__(self): + self.hash_count += 1 + return unicode.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: @@ -1054,22 +1065,42 @@ return l def newlist_bytes(self, l): return l + def newlist_unicode(self, l): + return l DictObjectCls = W_DictMultiObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str + if isinstance(w_obj, FakeUnicode): + return unicode return type(w_obj) w_str = str + w_unicode = unicode def str_w(self, string): + if isinstance(string, unicode): + return string.encode('utf-8') assert isinstance(string, str) return string + def bytes_w(self, string): + assert isinstance(string, str) + return string + + def unicode_w(self, string): + assert isinstance(string, unicode) + return string + def int_w(self, integer, allow_conversion=True): assert isinstance(integer, int) return integer def wrap(self, obj): + if isinstance(obj, str): + return obj.decode('ascii') + return obj + + def wrapbytes(self, obj): return obj def isinstance_w(self, obj, klass): @@ -1144,13 +1175,18 @@ assert value == d.descr_getitem(self.space, key) class BaseTestRDictImplementation: + FakeString = FakeUnicode + _str_devolves = False def setup_method(self,method): self.fakespace = FakeSpace() - self.string = self.fakespace.wrap("fish") - self.string2 = self.fakespace.wrap("fish2") + self.string = self.wrapstrorunicode("fish") + self.string2 = self.wrapstrorunicode("fish2") self.impl = self.get_impl() + def wrapstrorunicode(self, obj): + return self.fakespace.wrap(obj) + def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() @@ -1178,21 +1214,22 @@ else: assert a == self.string2 assert b == 2000 - assert self.impl.getitem_str(self.string) == 1000 + if not self._str_devolves: + result = self.impl.getitem_str(self.string) + else: + result = self.impl.getitem(self.string) + assert result == 1000 self.check_not_devolved() def test_setitem(self): self.impl.setitem(self.string, 1000) assert self.impl.length() == 1 assert self.impl.getitem(self.string) == 1000 - assert self.impl.getitem_str(self.string) == 1000 - self.check_not_devolved() - - def test_setitem_str(self): - self.impl.setitem_str(self.fakespace.str_w(self.string), 1000) - assert self.impl.length() == 1 - assert self.impl.getitem(self.string) == 1000 - assert self.impl.getitem_str(self.string) == 1000 + if not self._str_devolves: + result = self.impl.getitem_str(self.string) + else: + result = self.impl.getitem(self.string) + assert result == 1000 self.check_not_devolved() def test_delitem(self): @@ -1256,14 +1293,14 @@ def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names impl = self.impl - key = FakeString(self.string) + key = self.FakeString(self.string) x = impl.setdefault(key, 1) assert x == 1 - if on_pypy: + if on_pypy and self.FakeString is FakeString: assert key.hash_count == 1 x = impl.setdefault(key, 2) assert x == 1 - if on_pypy: + if on_pypy and self.FakeString is FakeString: assert key.hash_count == 2 def test_fallback_evil_key(self): @@ -1296,20 +1333,34 @@ assert w_key not in d.w_keys() assert F() not in d.w_keys() -class TestBytesDictImplementation(BaseTestRDictImplementation): - StrategyClass = BytesDictStrategy +class TestUnicodeDictImplementation(BaseTestRDictImplementation): + StrategyClass = UnicodeDictStrategy def test_str_shortcut(self): self.fill_impl() - s = FakeString(self.string) + s = self.FakeString(self.string) assert self.impl.getitem(s) == 1000 assert s.unwrapped def test_view_as_kwargs(self): - py.test.py3k_skip("XXX: strategies are currently broken") self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) + def test_setitem_str(self): + self.impl.setitem_str(self.fakespace.str_w(self.string), 1000) + assert self.impl.length() == 1 + assert self.impl.getitem(self.string) == 1000 + assert self.impl.getitem_str(self.string) == 1000 + self.check_not_devolved() + +class TestBytesDictImplementation(BaseTestRDictImplementation): + StrategyClass = BytesDictStrategy + FakeString = FakeString + _str_devolves = True + + def wrapstrorunicode(self, obj): + return self.fakespace.wrapbytes(obj) + class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): @@ -1319,13 +1370,12 @@ def check_not_devolved(self): pass -class TestDevolvedBytesDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = BytesDictStrategy +class TestDevolvedUnicodeDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = UnicodeDictStrategy def test_module_uses_strdict(): - py.test.py3k_skip("XXX: strategies are currently broken") fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.strategy) is UnicodeDictStrategy diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py --- a/pypy/objspace/std/test/test_identitydict.py +++ b/pypy/objspace/std/test/test_identitydict.py @@ -1,8 +1,6 @@ import py from pypy.interpreter.gateway import interp2app -py.test.py3k_skip("XXX: strategies are currently broken") - class AppTestComparesByIdentity: spaceconfig = {"objspace.std.withidentitydict": True} diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import py from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject from pypy.objspace.std.kwargsdict import * @@ -73,7 +74,7 @@ for i in range(100): assert d.setitem_str("d%s" % i, 4) is None assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert "UnicodeDictStrategy" == d.strategy.__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -133,7 +134,6 @@ return r[r.find("(") + 1: r.find(")")] def test_create(self): - py3k_skip("need UnicodeDictStrategy to work in py3k") def f(**args): return args d = f(a=1) @@ -149,7 +149,6 @@ assert sorted(f(a=2, b=3).values()) == [2, 3] def test_setdefault(self): - py3k_skip("XXX: strategies are currently broken") def f(**args): return args d = f(a=1, b=2) @@ -161,3 +160,23 @@ assert a == 3 assert "KwargsDictStrategy" in self.get_strategy(d) + def test_unicode(self): + """ + def f(**kwargs): + return kwargs + + d = f(λ=True) + assert list(d) == ['λ'] + assert next(iter(d)) == 'λ' + assert "KwargsDictStrategy" in self.get_strategy(d) + + d['foo'] = 'bar' + assert sorted(d) == ['foo', 'λ'] + assert "KwargsDictStrategy" in self.get_strategy(d) + + d = f(λ=True) + o = object() + d[o] = 'baz' + assert set(d) == set(['λ', o]) + assert "ObjectDictStrategy" in self.get_strategy(d) + """ diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -402,7 +402,6 @@ self.space.w_True) def test_sizehint(self): - py.test.py3k_skip("XXX: strategies are currently broken") space = self.space w_l = space.newlist([], sizehint=10) assert isinstance(w_l.strategy, SizeListStrategy) @@ -419,7 +418,6 @@ assert w_lst.strategy.sizehint == 13 def test_find_fast_on_intlist(self, monkeypatch): - py.test.py3k_skip("XXX: strategies are currently broken") monkeypatch.setattr(self.space, "eq_w", None) w = self.space.wrap intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,3 @@ -import py import sys from pypy.objspace.std.listobject import ( W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, @@ -7,7 +6,6 @@ from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject -py.test.py3k_skip("XXX: strategies are currently broken") class TestW_ListStrategies(TestW_ListObject): def test_check_strategy(self): @@ -186,6 +184,7 @@ def test_setslice(self): space = self.space w = space.wrap + wb = space.wrapbytes l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -579,9 +578,11 @@ assert not self.space.eq_w(l1, l2) def test_weird_rangelist_bug(self): - l = make_range_list(self.space, 1, 1, 3) + space = self.space + l = make_range_list(space, 1, 1, 3) # should not raise - assert l.descr_getslice(self.space, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) + w_slice = space.newslice(space.wrap(15), space.wrap(2222), space.wrap(1)) + assert l.descr_getitem(space, w_slice).strategy == space.fromcache(EmptyListStrategy) def test_add_to_rangelist(self): l1 = make_range_list(self.space, 1, 1, 3) @@ -642,13 +643,13 @@ def test_string_uses_newlist_bytes(self): space = self.space - w_s = space.wrap("a b c") + w_s = space.wrapbytes("a b c") space.newlist = None try: w_l = space.call_method(w_s, "split") - w_l2 = space.call_method(w_s, "split", space.wrap(" ")) + w_l2 = space.call_method(w_s, "split", space.wrapbytes(" ")) w_l3 = space.call_method(w_s, "rsplit") - w_l4 = space.call_method(w_s, "rsplit", space.wrap(" ")) + w_l4 = space.call_method(w_s, "rsplit", space.wrapbytes(" ")) finally: del space.newlist assert space.listview_bytes(w_l) == ["a", "b", "c"] @@ -680,8 +681,6 @@ assert space.unwrap(w_res) == 3 def test_create_list_from_set(self): - # this test fails because of the "w_set.iter = None" line below - py.test.py3k_skip("missing the correct list strategy") from pypy.objspace.std.setobject import W_SetObject from pypy.objspace.std.setobject import _initialize_set diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -84,12 +84,12 @@ assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) def test_create_set_from_list(self): - py.test.py3k_skip("XXX: strategies are currently broken") from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.setobject import BytesSetStrategy, ObjectSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject w = self.space.wrap + wb = self.space.wrapbytes intstr = self.space.fromcache(IntegerSetStrategy) tmp_func = intstr.get_storage_from_list # test if get_storage_from_list is no longer used @@ -101,10 +101,10 @@ assert w_set.strategy is intstr assert intstr.unerase(w_set.sstorage) == {1:None, 2:None, 3:None} - w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) + w_list = W_ListObject(self.space, [wb("1"), wb("2"), wb("3")]) w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) - assert w_set.strategy is self.space.fromcache(UnicodeSetStrategy) + assert w_set.strategy is self.space.fromcache(BytesSetStrategy) assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} w_list = self.space.iter(W_ListObject(self.space, [w(u"1"), w(u"2"), w(u"3")])) @@ -131,13 +131,13 @@ intstr.get_storage_from_list = tmp_func def test_listview_bytes_int_on_set(self): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap + wb = self.space.wrapbytes w_a = W_SetObject(self.space) - _initialize_set(self.space, w_a, w("abcdefg")) - assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") - assert self.space.listview_int(w_a) is None + _initialize_set(self.space, w_a, wb("abcdefg")) + assert sorted(self.space.listview_int(w_a)) == [97, 98, 99, 100, 101, 102, 103] + assert self.space.listview_bytes(w_a) is None w_b = W_SetObject(self.space) _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) @@ -1006,6 +1006,13 @@ # gives us 1, but 1 is not in the set any longer. raises(RuntimeError, list, it) + def test_iter_bytes_strategy(self): + l = [b'a', b'b'] + s = set(l) + n = next(iter(s)) + assert type(n) is bytes + assert n in l + def test_unicodestrategy(self): s = 'àèìòù' myset = set([s]) diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -5,9 +5,6 @@ UnicodeIteratorImplementation, UnicodeSetStrategy) from pypy.objspace.std.listobject import W_ListObject -import py -py.test.py3k_skip("XXX: strategies are currently broken") - class TestW_SetStrategies: def wrapped(self, l, bytes=False): From noreply at buildbot.pypy.org Sat Apr 26 02:10:53 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 26 Apr 2014 02:10:53 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20140426001053.1185A1C023E@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70985:b81d95fa41f2 Date: 2014-04-25 10:53 -0700 http://bitbucket.org/pypy/pypy/changeset/b81d95fa41f2/ Log: merge default into branch diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -22,22 +22,6 @@ compile; if absent or zero these statements do influence the compilation, in addition to any features explicitly specified. """ - - ast_node = None - w_ast_type = space.gettypeobject(ast.AST.typedef) - str_ = None - if space.isinstance_w(w_source, w_ast_type): - ast_node = space.interp_w(ast.mod, w_source) - ast_node.sync_app_attrs(space) - elif space.isinstance_w(w_source, space.w_unicode): - w_utf_8_source = space.call_method(w_source, "encode", - space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) - # This flag tells the parser to reject any coding cookies it sees. - flags |= consts.PyCF_SOURCE_IS_UTF8 - else: - str_ = space.str_w(w_source) - ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): @@ -53,14 +37,30 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - if ast_node is None: - if flags & consts.PyCF_ONLY_AST: - mod = ec.compiler.compile_to_ast(str_, filename, mode, flags) - return space.wrap(mod) - else: - code = ec.compiler.compile(str_, filename, mode, flags) + w_ast_type = space.gettypeobject(ast.AST.typedef) + if space.isinstance_w(w_source, w_ast_type): + ast_node = space.interp_w(ast.mod, w_source) + ast_node.sync_app_attrs(space) + code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + return space.wrap(code) + + if space.isinstance_w(w_source, space.w_unicode): + w_utf_8_source = space.call_method(w_source, "encode", + space.wrap("utf-8")) + str_ = space.str_w(w_utf_8_source) + # This flag tells the parser to reject any coding cookies it sees. + flags |= consts.PyCF_SOURCE_IS_UTF8 else: - code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + str_ = space.readbuf_w(w_source).as_str() + + if '\x00' in str_: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) + + if flags & consts.PyCF_ONLY_AST: + code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + else: + code = ec.compiler.compile(str_, filename, mode, flags) return space.wrap(code) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -490,6 +490,14 @@ def test_compile(self): co = compile('1+2', '?', 'eval') assert eval(co) == 3 + co = compile(buffer('1+2'), '?', 'eval') + assert eval(co) == 3 + exc = raises(TypeError, compile, chr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, unichr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, memoryview('1+2'), '?', 'eval') + assert str(exc.value) == "expected a readable buffer object" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') raises(ValueError, compile, '"\\xt"', '?', 'eval') diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -679,7 +679,7 @@ if space.isinstance_w(w_string, space.w_unicode): return space.newtuple([w_string, space.len(w_string)]) - string = space.str_w(w_string) + string = space.readbuf_w(w_string).as_str() if len(string) == 0: return space.newtuple([space.wrap(u''), space.wrap(0)]) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -276,7 +276,7 @@ assert enc == "a\x00\x00\x00" def test_unicode_internal_decode(self): - import sys + import sys, _codecs, array if sys.maxunicode == 65535: # UCS2 build if sys.byteorder == "big": bytes = "\x00a" @@ -291,6 +291,9 @@ bytes2 = "\x98\x00\x01\x00" assert bytes2.decode("unicode_internal") == u"\U00010098" assert bytes.decode("unicode_internal") == u"a" + assert _codecs.unicode_internal_decode(array.array('c', bytes))[0] == u"a" + exc = raises(TypeError, _codecs.unicode_internal_decode, memoryview(bytes)) + assert str(exc.value) == "expected a readable buffer object" def test_raw_unicode_escape(self): assert unicode("\u0663", "raw-unicode-escape") == u"\u0663" diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -460,14 +460,17 @@ space = self.space self.check_closed() - w_iterator = space.iter(w_lines) - while True: - try: - w_line = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done + lines = space.fixedview(w_lines) + for i, w_line in enumerate(lines): + if not space.isinstance_w(w_line, space.w_str): + try: + line = w_line.charbuf_w(space) + except TypeError: + raise OperationError(space.w_TypeError, space.wrap( + "writelines() argument must be a sequence of strings")) + else: + lines[i] = space.wrap(line) + for w_line in lines: self.file_write(w_line) def file_readinto(self, w_rwbuffer): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -386,6 +386,32 @@ assert len(somelines) > 200 assert somelines == lines[:len(somelines)] + def test_writelines(self): + import array + fn = self.temptestfile + with file(fn, 'w') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'r').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + exc = raises(TypeError, f.writelines, ['abc', memoryview('def')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == [] + def test_nasty_writelines(self): # The stream lock should be released between writes fn = self.temptestfile diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -193,7 +193,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.bufferstr_w(w_value) + value = space.str_w(w_value) if start + len(value) != stop: raise OperationError(space.w_ValueError, space.wrap("cannot resize array")) diff --git a/pypy/sandbox/pypy_interact.py b/pypy/sandbox/pypy_interact.py --- a/pypy/sandbox/pypy_interact.py +++ b/pypy/sandbox/pypy_interact.py @@ -21,7 +21,7 @@ """ import sys, os -sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))) from rpython.translator.sandbox.sandlib import SimpleIOSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSandboxedProc from rpython.translator.sandbox.vfs import Dir, RealDir, RealFile diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8356,6 +8356,31 @@ """ self.optimize_loop(ops, ops) + def test_unroll_failargs(self): + ops = """ + [p0, i1] + p1 = getfield_gc(p0, descr=valuedescr) + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2) + """ + expected = """ + [p0, i1, p1] + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2, p1) + """ + preamble = """ + [p0, i1] + p1 = getfield_gc(p0, descr=valuedescr) + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2, p1) + """ + self.optimize_loop(ops, expected, preamble) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/test/test_string.py b/rpython/jit/metainterp/test/test_string.py --- a/rpython/jit/metainterp/test/test_string.py +++ b/rpython/jit/metainterp/test/test_string.py @@ -654,3 +654,23 @@ self.check_resops(call_pure=0, unicodesetitem=0, call=2, newunicode=0, unicodegetitem=0, copyunicodecontent=0) + + def test_string_interpolation(self): + def f(x, y): + return len('<%d %d>' % (x, y)) + res = self.interp_operations(f, [222, 3333]) + assert res == 10 + + def test_string_interpolation_constants(self): + jitdriver = JitDriver(greens=['x', 'y'], reds=['z']) + def f(x, y): + z = 0 + while z < 10: + jitdriver.jit_merge_point(x=x, y=y, z=z) + if len('<%d %d>' % (x, y)) != 10: + raise Exception + z += 1 + return 0 + self.meta_interp(f, [222, 3333]) + self.check_simple_loop({'guard_true': 1, 'int_add': 1, + 'int_lt': 1, 'jump': 1}) diff --git a/rpython/translator/platform/test/test_posix.py b/rpython/translator/platform/test/test_posix.py --- a/rpython/translator/platform/test/test_posix.py +++ b/rpython/translator/platform/test/test_posix.py @@ -9,13 +9,8 @@ res = host.execute('echo', '42 24') assert res.out == '42 24\n' - if sys.platform == 'win32': - # echo is a shell builtin on Windows - res = host.execute('cmd', ['/c', 'echo', '42', '24']) - assert res.out == '42 24\n' - else: - res = host.execute('echo', ['42', '24']) - assert res.out == '42 24\n' + res = host.execute('echo', ['42', '24']) + assert res.out == '42 24\n' class TestMakefile(object): platform = host @@ -61,8 +56,13 @@ finally: del os.environ['PYPY_LOCALBASE'] Makefile = tmpdir.join('Makefile').read() - assert 'INCLUDEDIRS = -I/foo/baz/include' in Makefile - assert 'LIBDIRS = -L/foo/baz/lib' in Makefile + include_prefix = '-I' + lib_prefix = '-L' + if self.platform.name == 'msvc': + include_prefix = '/I' + lib_prefix = '/LIBPATH:' + assert 'INCLUDEDIRS = %s/foo/baz/include' % include_prefix in Makefile + assert 'LIBDIRS = %s/foo/baz/lib' % lib_prefix in Makefile class TestMaemo(TestMakefile): strict_on_stderr = False diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -292,7 +292,10 @@ rel_ofiles = [rel_cfile[:rel_cfile.rfind('.')]+'.obj' for rel_cfile in rel_cfiles] m.cfiles = rel_cfiles - rel_includedirs = [rpyrel(incldir) for incldir in eci.include_dirs] + rel_includedirs = [rpyrel(incldir) for incldir in + self.preprocess_include_dirs(eci.include_dirs)] + rel_libdirs = [rpyrel(libdir) for libdir in + self.preprocess_library_dirs(eci.library_dirs)] m.comment('automatically generated makefile') definitions = [ @@ -302,7 +305,7 @@ ('SOURCES', rel_cfiles), ('OBJECTS', rel_ofiles), ('LIBS', self._libs(eci.libraries)), - ('LIBDIRS', self._libdirs(eci.library_dirs)), + ('LIBDIRS', self._libdirs(rel_libdirs)), ('INCLUDEDIRS', self._includedirs(rel_includedirs)), ('CFLAGS', self.cflags), ('CFLAGSEXTRA', list(eci.compile_extra)), diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -3,6 +3,10 @@ trampolines that marshal their input arguments, dump them to STDOUT, and wait for an answer on STDIN. Enable with 'translate.py --sandbox'. """ +import sys +if sys.platform == 'win32': + raise TypeError("sandbox not supported on windows") + import py from rpython.rlib import rmarshal, types diff --git a/rpython/translator/sandbox/test/test_sandbox.py b/rpython/translator/sandbox/test/test_sandbox.py --- a/rpython/translator/sandbox/test/test_sandbox.py +++ b/rpython/translator/sandbox/test/test_sandbox.py @@ -25,7 +25,20 @@ check_str_without_nul=True) return str(t.compile()) +unsupported_platform = ('False', '') +if sys.platform == 'win32': + unsupported_platform = ('True', 'sandbox not supported on this platform') + def test_unavailable(): + def entry_point(argv): + fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) + os.close(fd) + return 0 + exc = py.test.raises(TypeError, compile, entry_point) + assert str(exc).find('not supported') >= 0 +supported = py.test.mark.skipif(unsupported_platform[0], reason=unsupported_platform[1]) + + at supported def test_open_dup(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -43,6 +56,7 @@ f.close() assert tail == "" + at supported def test_read_write(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -65,6 +79,7 @@ f.close() assert tail == "" + at supported def test_dup2_access(): def entry_point(argv): os.dup2(34, 56) @@ -80,6 +95,7 @@ f.close() assert tail == "" + at supported def test_stat_ftruncate(): from rpython.translator.sandbox.sandlib import RESULTTYPE_STATRESULT from rpython.rlib.rarithmetic import r_longlong @@ -101,6 +117,7 @@ f.close() assert tail == "" + at supported def test_time(): def entry_point(argv): t = time.time() @@ -116,6 +133,7 @@ f.close() assert tail == "" + at supported def test_getcwd(): def entry_point(argv): t = os.getcwd() @@ -131,6 +149,7 @@ f.close() assert tail == "" + at supported def test_oserror(): def entry_point(argv): try: @@ -148,6 +167,7 @@ f.close() assert tail == "" + at supported def test_hybrid_gc(): def entry_point(argv): l = [] @@ -172,6 +192,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported def test_segfault_1(): class A: def __init__(self, m): @@ -194,6 +215,7 @@ e.close() assert 'Invalid RPython operation' in errors + at supported def test_segfault_2(): py.test.skip("hum, this is one example, but we need to be very careful") class Base: @@ -226,6 +248,7 @@ e.close() assert '...think what kind of errors to get...' in errors + at supported def test_safe_alloc(): from rpython.rlib.rmmap import alloc, free @@ -246,6 +269,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported def test_unsafe_mmap(): py.test.skip("Since this stuff is unimplemented, it won't work anyway " "however, the day it starts working, it should pass test") @@ -271,6 +295,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported class TestPrintedResults: def run(self, entry_point, args, expected): diff --git a/rpython/translator/sandbox/test/test_sandlib.py b/rpython/translator/sandbox/test/test_sandlib.py --- a/rpython/translator/sandbox/test/test_sandlib.py +++ b/rpython/translator/sandbox/test/test_sandlib.py @@ -6,10 +6,10 @@ from rpython.translator.sandbox.sandlib import SimpleIOSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSocketProc -from rpython.translator.sandbox.test.test_sandbox import compile +from rpython.translator.sandbox.test.test_sandbox import compile, supported from rpython.translator.sandbox.vfs import Dir, File, RealDir, RealFile - + at supported class MockSandboxedProc(SandboxedProc): """A sandbox process wrapper that replays expected syscalls.""" @@ -35,7 +35,7 @@ do_ll_os__ll_os_write = _make_method("write") do_ll_os__ll_os_close = _make_method("close") - + at supported def test_lib(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -63,6 +63,7 @@ proc.handle_forever() assert proc.seen == len(proc.expected) + at supported def test_foobar(): py.test.skip("to be updated") foobar = rffi.llexternal("foobar", [rffi.CCHARP], rffi.LONG) @@ -79,6 +80,7 @@ proc.handle_forever() assert proc.seen == len(proc.expected) + at supported def test_simpleio(): def entry_point(argv): print "Please enter a number:" @@ -100,6 +102,7 @@ assert output == "Please enter a number:\nThe double is: 42\n" assert error == "" + at supported def test_socketio(): class SocketProc(VirtualizedSocketProc, SimpleIOSandboxedProc): def build_virtual_root(self): @@ -116,6 +119,7 @@ output, error = proc.communicate("") assert output.startswith('HTTP/1.0 503 Service Unavailable') + at supported def test_oserror(): def entry_point(argv): try: @@ -133,6 +137,7 @@ assert proc.seen == len(proc.expected) + at supported class SandboxedProcWithFiles(VirtualizedSandboxedProc, SimpleIOSandboxedProc): """A sandboxed process with a simple virtualized filesystem. @@ -145,6 +150,7 @@ 'this.pyc': RealFile(__file__), }) + at supported def test_too_many_opens(): def entry_point(argv): try: @@ -186,6 +192,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_fstat(): def compare(a, b, i): if a != b: @@ -219,6 +226,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_lseek(): def char_should_be(c, should): if c != should: @@ -248,6 +256,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_getuid(): def entry_point(argv): import os diff --git a/rpython/translator/sandbox/test/test_vfs.py b/rpython/translator/sandbox/test/test_vfs.py --- a/rpython/translator/sandbox/test/test_vfs.py +++ b/rpython/translator/sandbox/test/test_vfs.py @@ -2,10 +2,13 @@ import sys, stat, os from rpython.translator.sandbox.vfs import * from rpython.tool.udir import udir +from rpython.translator.sandbox.test.test_sandbox import unsupported_platform HASLINK = hasattr(os, 'symlink') def setup_module(mod): + if unsupported_platform[0] == 'True': + py.test.skip(unsupported_platform[1]) d = udir.ensure('test_vfs', dir=1) d.join('file1').write('somedata1') d.join('file2').write('somelongerdata2') From noreply at buildbot.pypy.org Sat Apr 26 02:10:54 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 26 Apr 2014 02:10:54 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: open up more tests to the dummy backend Message-ID: <20140426001054.7FD601C023E@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70986:0403fe95c260 Date: 2014-04-25 13:01 -0700 http://bitbucket.org/pypy/pypy/changeset/0403fe95c260/ Log: open up more tests to the dummy backend diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -3,12 +3,23 @@ #include #include +#include #include #include #include #include +// add example01.cxx code +int globalAddOneToInt(int a); + +namespace dummy { +#include "example01.cxx" +} + +int globalAddOneToInt(int a) { + return dummy::globalAddOneToInt(a); +} /* pseudo-reflection data ------------------------------------------------- */ namespace { @@ -38,28 +49,16 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; -class PseudoExample01 { -public: - PseudoExample01() : m_somedata(-99) {} - PseudoExample01(int a) : m_somedata(a) {} - PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} - PseudoExample01& operator=(const PseudoExample01& e) { - if (this != &e) m_somedata = e.m_somedata; - return *this; - } - virtual ~PseudoExample01() {} - -public: - int m_somedata; -}; - static int example01_last_static_method = 0; static int example01_last_constructor = 0; +static int payload_methods_offset = 0; struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- static long s_scope_id = 0; + + { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; @@ -115,14 +114,81 @@ // cut-off is used in cppyy_is_constructor example01_last_constructor = methods.size(); - // (12) double addDataToDouble(double a) + // (12) int addDataToInt(int a) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + + // (13) int addDataToIntConstRef(const int& a) + argtypes.clear(); + argtypes.push_back("const int&"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + + // (14) int overloadedAddDataToInt(int a, int b) + argtypes.clear(); + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (15) int overloadedAddDataToInt(int a) + // (16) int overloadedAddDataToInt(int a, int b, int c) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (17) double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + // (18) int addDataToAtoi(const char* str) + // (19) char* addToStringValue(const char* str) + argtypes.clear(); + argtypes.push_back("const char*"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + + // (20) void setPayload(payload* p) + // (21) payload* cyclePayload(payload* p) + // (22) payload copyCyclePayload(payload* p) + argtypes.clear(); + argtypes.push_back("payload*"); + methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + + payload_methods_offset = methods.size(); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; - // -- class example01 + } // -- class example01 + + { // class payload -- + s_handles["payload"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // (23) payload(double d = 0.) + std::vector argtypes; + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + + // (24) double getData() + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + + // (25) void setData(double d) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + + Cppyy_PseudoClassInfo info(methods); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class payload } } _init; @@ -150,36 +216,69 @@ return s_handles[scope_name]; // lookup failure will return 0 (== error) } +cppyy_type_t cppyy_actual_class(cppyy_type_t klass, cppyy_object_t /* obj */) { + return klass; +} + /* memory management ------------------------------------------------------ */ void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { if (handle == s_handles["example01"]) - delete (PseudoExample01*)self; + delete (dummy::example01*)self; } /* method/function dispatching -------------------------------------------- */ +void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + switch ((long)method) { + case 5: // static void example01:;staticSetPayload(payload* p, double d) + assert(!self && nargs == 2); + dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 9: // static void example01::setCount(int) + assert(!self && nargs == 1); + dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); + break; + case 20: // void example01::setPayload(payload* p); + assert(self && nargs == 1); + ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_v"); + break; + } +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; switch ((long)method) { - case 1: // static int staticAddOneToInt(int) + case 1: // static int example01::staticAddOneToInt(int) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.in + 1; + result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); break; - case 2: // static int staticAddOneToInt(int, int) + case 2: // static int example01::staticAddOneToInt(int, int) assert(!self && nargs == 2); - result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; + result = dummy::example01::staticAddOneToInt( + ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); break; - case 3: // static int staticAtoi(const char* str) + case 3: // static int example01::staticAtoi(const char* str) assert(!self && nargs == 1); - result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; - case 8: // static int getCount() - assert(!self && nargs == 0); - // can't actually call this method (would need to resolve example01::count), but - // other than the memory tests, most tests just check for 0 at the end - result = 0; - break; + case 8: // static int example01::getCount() + assert(!self && nargs == 0); + result = dummy::example01::getCount(); + break; + case 12: // int example01::addDataToInt(int a) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); + break; + case 18: // int example01::addDataToAtoi(const char* str) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToAtoi( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; default: assert(!"method unknown in cppyy_call_i"); break; @@ -188,26 +287,50 @@ } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - if ((long)method == 4) { // static char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + long result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = (long)dummy::example01::staticStrcpy( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + assert(!self && nargs == 2); + result = (long)dummy::example01::staticCyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 19: // char* example01::addToStringValue(const char* str) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->addToStringValue( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 21: // payload* example01::cyclePayload(payload* p) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->cyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_l"); + break; } - assert(!"method unknown in cppyy_call_l"); - return 0; + return result; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; switch ((long)method) { - case 0: // static double staticAddToDouble(double) + case 0: // static double example01::staticAddToDouble(double) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + result = dummy::example01::staticAddToDouble(((CPPYY_G__value*)args)[0].obj.d); break; - case 12: // double addDataToDouble(double a) + case 17: // double example01::addDataToDouble(double a) assert(self && nargs == 1); - result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + result = ((dummy::example01*)self)->addDataToDouble(((CPPYY_G__value*)args)[0].obj.d); + break; + case 24: // double payload::getData() + assert(self && nargs == 0); + result = ((dummy::payload*)self)->getData(); break; default: assert(!"method unknown in cppyy_call_d"); @@ -217,11 +340,17 @@ } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return strout; + char* result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = dummy::example01::staticStrcpy((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_s"); + break; + } + return result; } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { @@ -230,17 +359,27 @@ switch ((long)method) { case 10: assert(nargs == 0); - result = new PseudoExample01; + result = new dummy::example01; break; case 11: assert(nargs == 1); - result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); break; default: - assert(!"method unknown in cppyy_constructor"); + assert(!"method of example01 unknown in cppyy_constructor"); break; } - } + } else if (handle == s_handles["payload"]) { + switch ((long)method) { + case 23: + if (nargs == 0) result = new dummy::payload; + else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + break; + default: + assert(!"method payload unknown in cppyy_constructor"); + break; + } + } return (cppyy_object_t)result; } @@ -346,8 +485,13 @@ return 0; } -cppyy_method_t cppyy_get_method(cppyy_scope_t /* handle */, cppyy_index_t method_index) { - return (cppyy_method_t)method_index; +cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return (cppyy_method_t)method_index; + else if (handle == s_handles["payload"]) + return (cppyy_method_t)((long)method_index + payload_methods_offset); + assert(!"unknown class in cppyy_get_method"); + return (cppyy_method_t)0; } @@ -356,12 +500,16 @@ if (handle == s_handles["example01"]) return example01_last_static_method <= method_index && method_index < example01_last_constructor; + else if (handle == s_handles["payload"]) + return (long)method_index == 0; return 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { if (handle == s_handles["example01"]) return method_index < example01_last_static_method ? 1 : 0; + if (handle == s_handles["payload"]) + return 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -8,11 +8,12 @@ # run only tests that are covered by the dummy backend and tests # that do not rely on reflex if not ('test_helper.py' in item.location[0] or \ - 'test_cppyy.py' in item.location[0]): + 'test_cppyy.py' in item.location[0] or \ + 'test_pythonify.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if 'test_cppyy.py' in item.location[0] and \ - not re.search("test0[1-36]", item.location[2]): + if 'test_pythonify.py' in item.location[0] and \ + not re.search("AppTestPYTHONIFY.test0[1-6]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_ignore_collect(path, config): @@ -39,10 +40,11 @@ pkgpath = py.path.local(__file__).dirpath().join(os.pardir) srcpath = pkgpath.join('src') incpath = pkgpath.join('include') + tstpath = pkgpath.join('test') eci = ExternalCompilationInfo( separate_module_files=[srcpath.join('dummy_backend.cxx')], - include_dirs=[incpath], + include_dirs=[incpath, tstpath], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/test/example01.cxx b/pypy/module/cppyy/test/example01.cxx --- a/pypy/module/cppyy/test/example01.cxx +++ b/pypy/module/cppyy/test/example01.cxx @@ -1,4 +1,3 @@ -#include #include #include #include diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -321,7 +321,7 @@ e = cppyy.gbl.example01(2) assert 5 == meth(e, 3) - def test01_installable_function(self): + def test15_installable_function(self): """Test installing and calling global C++ function as python method""" import cppyy From noreply at buildbot.pypy.org Sat Apr 26 02:10:56 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 26 Apr 2014 02:10:56 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: use getarg_w instead of readbuf_w (thanks Brian!) Message-ID: <20140426001056.1AEA21C023E@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70987:154db8349f24 Date: 2014-04-25 13:32 -0700 http://bitbucket.org/pypy/pypy/changeset/154db8349f24/ Log: use getarg_w instead of readbuf_w (thanks Brian!) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.readbuf_w(w_obj) + buf = space.getarg_w('s*', w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.readbuf_w(w_value) + buf = space.getarg_w('s*', w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.readbuf_w(w_value) + buf = space.getarg_w('s*', w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -133,10 +133,7 @@ return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' - def buffer_w(self, w_obj, flags): - return FakeBuffer(w_obj) - - def readbuf_w(self, w_obj): + def getarg_w(self, code, w_obj): # for retrieving buffers return FakeBuffer(w_obj) def exception_match(self, typ, sub): From noreply at buildbot.pypy.org Sat Apr 26 02:10:57 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 26 Apr 2014 02:10:57 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: expose cppyy.Template for end-user use Message-ID: <20140426001057.92BFB1C023E@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70988:5ebbaf3f1099 Date: 2014-04-25 13:56 -0700 http://bitbucket.org/pypy/pypy/changeset/5ebbaf3f1099/ Log: expose cppyy.Template for end-user use diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -25,6 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', + 'Template' : 'pythonify.CppyyTemplateType', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -25,9 +25,12 @@ # class CppyyClass defined in _init_pythonify() class CppyyTemplateType(object): - def __init__(self, scope, name): - self._scope = scope + def __init__(self, name, scope=None): self._name = name + if scope is None: + self._scope = gbl + else: + self._scope = scope def _arg_to_str(self, arg): if arg == str: @@ -206,7 +209,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(scope, template_name) + return CppyyTemplateType(template_name, scope) def get_pycppitem(scope, name): diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -477,3 +477,29 @@ assert b1 == e2 assert b1 != b2 assert b1 == e2 + + +class AppTestTEMPLATE_UI: + spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + + def setup_class(cls): + cls.w_test_dct = cls.space.wrap(test_dct) + cls.w_stlstring = cls.space.appexec([], """(): + import cppyy, sys + return cppyy.load_reflection_info(%r)""" % (test_dct, )) + + def test01_explicit_templates(self): + """Explicit use of Template class""" + + import cppyy + + vector = cppyy.Template('vector', cppyy.gbl.std) + assert vector[int] == vector(int) + + v = vector[int]() + + N = 10 + v += range(N) + assert len(v) == N + for i in range(N): + assert v[i] == i From noreply at buildbot.pypy.org Sat Apr 26 02:10:59 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 26 Apr 2014 02:10:59 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix for issue 1676 Message-ID: <20140426001059.016221C023E@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70989:866f101100fc Date: 2014-04-25 16:23 -0700 http://bitbucket.org/pypy/pypy/changeset/866f101100fc/ Log: fix for issue 1676 diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -450,8 +450,8 @@ class CPPConstructor(CPPMethod): """Method dispatcher that constructs new objects. This method can not have - a fast path, a the allocation of the object is currently left to the - reflection layer only, b/c the C++ class may have an overloaded operator + a fast path, as the allocation of the object is currently left to the + reflection layer only, since the C++ class may have an overloaded operator new, disallowing malloc here.""" _immutable_ = True @@ -460,8 +460,18 @@ # TODO: these casts are very, very un-pretty; need to find a way of # re-using CPPMethod's features w/o these roundabouts vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - w_result = CPPMethod.call(self, vscope, args_w) + cppinstance = None + try: + cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) + use_args_w = args_w[1:] + except (OperationError, TypeError), e: + use_args_w = args_w + w_result = CPPMethod.call(self, vscope, use_args_w) newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + if cppinstance: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return args_w[0] return wrap_cppobject(self.space, newthis, self.scope, do_cast=False, python_owns=True, fresh=True) @@ -1141,10 +1151,14 @@ self.objects = rweakref.RWeakValueDictionary(int, W_CPPInstance) def register(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, obj) def unregister(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, None) @@ -1194,7 +1208,7 @@ w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created - if not fresh: + if not fresh and rawobject: obj = memory_regulator.retrieve(rawobject) if obj is not None and obj.cppclass is cppclass: return obj diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -146,7 +146,12 @@ raise TypeError(msg) else: def __new__(cls, *args): - return constructor_overload.call(None, *args) + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -427,7 +432,9 @@ __metaclass__ = CppyyClassMeta def __init__(self, *args, **kwds): - pass # ignored, for the C++ backend, ctor == __new__ + __init__ + # self is only a placeholder; now create the actual C++ object + args = (self,) + args + self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -332,6 +332,33 @@ assert 2 == e.fresh(1) assert 3 == e.fresh(2) + def test16_subclassing(self): + """A sub-class on the python side should have that class as type""" + + import cppyy + example01 = cppyy.gbl.example01 + + o = example01() + assert type(o) == example01 + + class MyClass1(example01): + def myfunc(self): + return 1 + + o = MyClass1() + assert type(o) == MyClass1 + assert isinstance(o, example01) + assert o.myfunc() == 1 + + class MyClass2(example01): + def __init__(self, what): + example01.__init__(self) + self.what = what + + o = MyClass2('hi') + assert type(o) == MyClass2 + assert o.what == 'hi' + class AppTestPYTHONIFY_UI: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) From noreply at buildbot.pypy.org Sat Apr 26 02:11:00 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 26 Apr 2014 02:11:00 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20140426001100.437BF1C023E@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r70990:812b9524c822 Date: 2014-04-25 16:23 -0700 http://bitbucket.org/pypy/pypy/changeset/812b9524c822/ Log: merge default into branch diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -0,0 +1,28 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestBuffers(BaseTestPyPyC): + def test_re_match(self): + def main(): + import re + import array + p = re.compile('.+') + a = array.array('c', 'test' * 1000) + i = 0 + while i < 5000: + i += 1 + p.match(a) # ID: match + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('match', """ + guard_not_invalidated(descr=...) + i65 = getfield_gc(p18, descr=...) + i67 = int_gt(0, i65) + guard_false(i67, descr=...) + i69 = int_gt(., i65) + guard_true(i69, descr=...) + guard_not_invalidated(descr=...) + i74 = getfield_raw(., descr=...) + i75 = int_lt(i74, 0) + guard_false(i75, descr=...) + """) From noreply at buildbot.pypy.org Sat Apr 26 02:11:01 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 26 Apr 2014 02:11:01 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support into branch: more tests opened, issue 1676 fixed Message-ID: <20140426001101.96EBC1C023E@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r70991:73f0d418fb69 Date: 2014-04-25 17:10 -0700 http://bitbucket.org/pypy/pypy/changeset/73f0d418fb69/ Log: merge reflex-support into branch: more tests opened, issue 1676 fixed diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -25,6 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', + 'Template' : 'pythonify.CppyyTemplateType', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.readbuf_w(w_obj) + buf = space.getarg_w('s*', w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.readbuf_w(w_value) + buf = space.getarg_w('s*', w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.readbuf_w(w_value) + buf = space.getarg_w('s*', w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -450,8 +450,8 @@ class CPPConstructor(CPPMethod): """Method dispatcher that constructs new objects. This method can not have - a fast path, a the allocation of the object is currently left to the - reflection layer only, b/c the C++ class may have an overloaded operator + a fast path, as the allocation of the object is currently left to the + reflection layer only, since the C++ class may have an overloaded operator new, disallowing malloc here.""" _immutable_ = True @@ -460,8 +460,18 @@ # TODO: these casts are very, very un-pretty; need to find a way of # re-using CPPMethod's features w/o these roundabouts vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - w_result = CPPMethod.call(self, vscope, args_w) + cppinstance = None + try: + cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) + use_args_w = args_w[1:] + except (OperationError, TypeError), e: + use_args_w = args_w + w_result = CPPMethod.call(self, vscope, use_args_w) newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + if cppinstance: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return args_w[0] return wrap_cppobject(self.space, newthis, self.scope, do_cast=False, python_owns=True, fresh=True) @@ -1141,10 +1151,14 @@ self.objects = rweakref.RWeakValueDictionary(int, W_CPPInstance) def register(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, obj) def unregister(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, None) @@ -1194,7 +1208,7 @@ w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created - if not fresh: + if not fresh and rawobject: obj = memory_regulator.retrieve(rawobject) if obj is not None and obj.cppclass is cppclass: return obj diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -25,9 +25,12 @@ # class CppyyClass defined in _init_pythonify() class CppyyTemplateType(object): - def __init__(self, scope, name): - self._scope = scope + def __init__(self, name, scope=None): self._name = name + if scope is None: + self._scope = gbl + else: + self._scope = scope def _arg_to_str(self, arg): if arg == str: @@ -143,7 +146,12 @@ raise TypeError(msg) else: def __new__(cls, *args): - return constructor_overload.call(None, *args) + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -206,7 +214,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(scope, template_name) + return CppyyTemplateType(template_name, scope) def get_pycppitem(scope, name): @@ -424,7 +432,9 @@ __metaclass__ = CppyyClassMeta def __init__(self, *args, **kwds): - pass # ignored, for the C++ backend, ctor == __new__ + __init__ + # self is only a placeholder; now create the actual C++ object + args = (self,) + args + self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -3,12 +3,23 @@ #include #include +#include #include #include #include #include +// add example01.cxx code +int globalAddOneToInt(int a); + +namespace dummy { +#include "example01.cxx" +} + +int globalAddOneToInt(int a) { + return dummy::globalAddOneToInt(a); +} /* pseudo-reflection data ------------------------------------------------- */ namespace { @@ -38,28 +49,16 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; -class PseudoExample01 { -public: - PseudoExample01() : m_somedata(-99) {} - PseudoExample01(int a) : m_somedata(a) {} - PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} - PseudoExample01& operator=(const PseudoExample01& e) { - if (this != &e) m_somedata = e.m_somedata; - return *this; - } - virtual ~PseudoExample01() {} - -public: - int m_somedata; -}; - static int example01_last_static_method = 0; static int example01_last_constructor = 0; +static int payload_methods_offset = 0; struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- static long s_scope_id = 0; + + { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; @@ -115,14 +114,81 @@ // cut-off is used in cppyy_is_constructor example01_last_constructor = methods.size(); - // (12) double addDataToDouble(double a) + // (12) int addDataToInt(int a) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + + // (13) int addDataToIntConstRef(const int& a) + argtypes.clear(); + argtypes.push_back("const int&"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + + // (14) int overloadedAddDataToInt(int a, int b) + argtypes.clear(); + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (15) int overloadedAddDataToInt(int a) + // (16) int overloadedAddDataToInt(int a, int b, int c) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (17) double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + // (18) int addDataToAtoi(const char* str) + // (19) char* addToStringValue(const char* str) + argtypes.clear(); + argtypes.push_back("const char*"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + + // (20) void setPayload(payload* p) + // (21) payload* cyclePayload(payload* p) + // (22) payload copyCyclePayload(payload* p) + argtypes.clear(); + argtypes.push_back("payload*"); + methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + + payload_methods_offset = methods.size(); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; - // -- class example01 + } // -- class example01 + + { // class payload -- + s_handles["payload"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // (23) payload(double d = 0.) + std::vector argtypes; + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + + // (24) double getData() + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + + // (25) void setData(double d) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + + Cppyy_PseudoClassInfo info(methods); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class payload } } _init; @@ -150,36 +216,69 @@ return s_handles[scope_name]; // lookup failure will return 0 (== error) } +cppyy_type_t cppyy_actual_class(cppyy_type_t klass, cppyy_object_t /* obj */) { + return klass; +} + /* memory management ------------------------------------------------------ */ void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { if (handle == s_handles["example01"]) - delete (PseudoExample01*)self; + delete (dummy::example01*)self; } /* method/function dispatching -------------------------------------------- */ +void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + switch ((long)method) { + case 5: // static void example01:;staticSetPayload(payload* p, double d) + assert(!self && nargs == 2); + dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 9: // static void example01::setCount(int) + assert(!self && nargs == 1); + dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); + break; + case 20: // void example01::setPayload(payload* p); + assert(self && nargs == 1); + ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_v"); + break; + } +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; switch ((long)method) { - case 1: // static int staticAddOneToInt(int) + case 1: // static int example01::staticAddOneToInt(int) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.in + 1; + result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); break; - case 2: // static int staticAddOneToInt(int, int) + case 2: // static int example01::staticAddOneToInt(int, int) assert(!self && nargs == 2); - result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; + result = dummy::example01::staticAddOneToInt( + ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); break; - case 3: // static int staticAtoi(const char* str) + case 3: // static int example01::staticAtoi(const char* str) assert(!self && nargs == 1); - result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; - case 8: // static int getCount() - assert(!self && nargs == 0); - // can't actually call this method (would need to resolve example01::count), but - // other than the memory tests, most tests just check for 0 at the end - result = 0; - break; + case 8: // static int example01::getCount() + assert(!self && nargs == 0); + result = dummy::example01::getCount(); + break; + case 12: // int example01::addDataToInt(int a) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); + break; + case 18: // int example01::addDataToAtoi(const char* str) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToAtoi( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; default: assert(!"method unknown in cppyy_call_i"); break; @@ -188,26 +287,50 @@ } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - if ((long)method == 4) { // static char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + long result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = (long)dummy::example01::staticStrcpy( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + assert(!self && nargs == 2); + result = (long)dummy::example01::staticCyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 19: // char* example01::addToStringValue(const char* str) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->addToStringValue( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 21: // payload* example01::cyclePayload(payload* p) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->cyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_l"); + break; } - assert(!"method unknown in cppyy_call_l"); - return 0; + return result; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; switch ((long)method) { - case 0: // static double staticAddToDouble(double) + case 0: // static double example01::staticAddToDouble(double) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + result = dummy::example01::staticAddToDouble(((CPPYY_G__value*)args)[0].obj.d); break; - case 12: // double addDataToDouble(double a) + case 17: // double example01::addDataToDouble(double a) assert(self && nargs == 1); - result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + result = ((dummy::example01*)self)->addDataToDouble(((CPPYY_G__value*)args)[0].obj.d); + break; + case 24: // double payload::getData() + assert(self && nargs == 0); + result = ((dummy::payload*)self)->getData(); break; default: assert(!"method unknown in cppyy_call_d"); @@ -217,11 +340,17 @@ } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return strout; + char* result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = dummy::example01::staticStrcpy((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_s"); + break; + } + return result; } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { @@ -230,17 +359,27 @@ switch ((long)method) { case 10: assert(nargs == 0); - result = new PseudoExample01; + result = new dummy::example01; break; case 11: assert(nargs == 1); - result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); break; default: - assert(!"method unknown in cppyy_constructor"); + assert(!"method of example01 unknown in cppyy_constructor"); break; } - } + } else if (handle == s_handles["payload"]) { + switch ((long)method) { + case 23: + if (nargs == 0) result = new dummy::payload; + else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + break; + default: + assert(!"method payload unknown in cppyy_constructor"); + break; + } + } return (cppyy_object_t)result; } @@ -346,8 +485,13 @@ return 0; } -cppyy_method_t cppyy_get_method(cppyy_scope_t /* handle */, cppyy_index_t method_index) { - return (cppyy_method_t)method_index; +cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return (cppyy_method_t)method_index; + else if (handle == s_handles["payload"]) + return (cppyy_method_t)((long)method_index + payload_methods_offset); + assert(!"unknown class in cppyy_get_method"); + return (cppyy_method_t)0; } @@ -356,12 +500,16 @@ if (handle == s_handles["example01"]) return example01_last_static_method <= method_index && method_index < example01_last_constructor; + else if (handle == s_handles["payload"]) + return (long)method_index == 0; return 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { if (handle == s_handles["example01"]) return method_index < example01_last_static_method ? 1 : 0; + if (handle == s_handles["payload"]) + return 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -8,11 +8,12 @@ # run only tests that are covered by the dummy backend and tests # that do not rely on reflex if not ('test_helper.py' in item.location[0] or \ - 'test_cppyy.py' in item.location[0]): + 'test_cppyy.py' in item.location[0] or \ + 'test_pythonify.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if 'test_cppyy.py' in item.location[0] and \ - not re.search("test0[1-36]", item.location[2]): + if 'test_pythonify.py' in item.location[0] and \ + not re.search("AppTestPYTHONIFY.test0[1-6]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_ignore_collect(path, config): @@ -39,10 +40,11 @@ pkgpath = py.path.local(__file__).dirpath().join(os.pardir) srcpath = pkgpath.join('src') incpath = pkgpath.join('include') + tstpath = pkgpath.join('test') eci = ExternalCompilationInfo( separate_module_files=[srcpath.join('dummy_backend.cxx')], - include_dirs=[incpath], + include_dirs=[incpath, tstpath], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/test/example01.cxx b/pypy/module/cppyy/test/example01.cxx --- a/pypy/module/cppyy/test/example01.cxx +++ b/pypy/module/cppyy/test/example01.cxx @@ -1,4 +1,3 @@ -#include #include #include #include diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -321,7 +321,7 @@ e = cppyy.gbl.example01(2) assert 5 == meth(e, 3) - def test01_installable_function(self): + def test15_installable_function(self): """Test installing and calling global C++ function as python method""" import cppyy @@ -332,6 +332,33 @@ assert 2 == e.fresh(1) assert 3 == e.fresh(2) + def test16_subclassing(self): + """A sub-class on the python side should have that class as type""" + + import cppyy + example01 = cppyy.gbl.example01 + + o = example01() + assert type(o) == example01 + + class MyClass1(example01): + def myfunc(self): + return 1 + + o = MyClass1() + assert type(o) == MyClass1 + assert isinstance(o, example01) + assert o.myfunc() == 1 + + class MyClass2(example01): + def __init__(self, what): + example01.__init__(self) + self.what = what + + o = MyClass2('hi') + assert type(o) == MyClass2 + assert o.what == 'hi' + class AppTestPYTHONIFY_UI: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -477,3 +477,29 @@ assert b1 == e2 assert b1 != b2 assert b1 == e2 + + +class AppTestTEMPLATE_UI: + spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + + def setup_class(cls): + cls.w_test_dct = cls.space.wrap(test_dct) + cls.w_stlstring = cls.space.appexec([], """(): + import cppyy, sys + return cppyy.load_reflection_info(%r)""" % (test_dct, )) + + def test01_explicit_templates(self): + """Explicit use of Template class""" + + import cppyy + + vector = cppyy.Template('vector', cppyy.gbl.std) + assert vector[int] == vector(int) + + v = vector[int]() + + N = 10 + v += range(N) + assert len(v) == N + for i in range(N): + assert v[i] == i diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -133,10 +133,7 @@ return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' - def buffer_w(self, w_obj, flags): - return FakeBuffer(w_obj) - - def readbuf_w(self, w_obj): + def getarg_w(self, code, w_obj): # for retrieving buffers return FakeBuffer(w_obj) def exception_match(self, typ, sub): From noreply at buildbot.pypy.org Sat Apr 26 04:36:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 04:36:41 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup test Message-ID: <20140426023641.53BE01C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70992:298068f3d4ea Date: 2014-04-25 16:54 -0400 http://bitbucket.org/pypy/pypy/changeset/298068f3d4ea/ Log: cleanup test diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -3,16 +3,18 @@ class TestBuffers(BaseTestPyPyC): def test_re_match(self): - def main(): + def main(n): import re import array p = re.compile('.+') a = array.array('c', 'test' * 1000) i = 0 - while i < 5000: + while i < n: i += 1 p.match(a) # ID: match - log = self.run(main, []) + return i + log = self.run(main, [1000]) + assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('match', """ guard_not_invalidated(descr=...) @@ -21,8 +23,5 @@ guard_false(i67, descr=...) i69 = int_gt(., i65) guard_true(i69, descr=...) - guard_not_invalidated(descr=...) - i74 = getfield_raw(., descr=...) - i75 = int_lt(i74, 0) - guard_false(i75, descr=...) + --TICK-- """) From noreply at buildbot.pypy.org Sat Apr 26 04:36:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 04:36:42 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix tcsetattr validation of attributes Message-ID: <20140426023642.8A87E1C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70993:e87d8dddb7f3 Date: 2014-04-25 19:38 -0400 http://bitbucket.org/pypy/pypy/changeset/e87d8dddb7f3/ Log: test/fix tcsetattr validation of attributes diff --git a/pypy/module/termios/interp_termios.py b/pypy/module/termios/interp_termios.py --- a/pypy/module/termios/interp_termios.py +++ b/pypy/module/termios/interp_termios.py @@ -4,7 +4,7 @@ """ from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import wrap_oserror +from pypy.interpreter.error import wrap_oserror, OperationError from rpython.rlib import rtermios import termios @@ -19,6 +19,10 @@ @unwrap_spec(when=int) def tcsetattr(space, w_fd, when, w_attributes): fd = space.c_filedescriptor_w(w_fd) + if not space.isinstance_w(w_attributes, space.w_list) or \ + space.len_w(w_attributes) != 7: + raise OperationError(space.w_TypeError, space.wrap( + "tcsetattr, arg 3: must be 7 element list")) w_iflag, w_oflag, w_cflag, w_lflag, w_ispeed, w_ospeed, w_cc = \ space.unpackiterable(w_attributes, expected_length=7) w_builtin = space.getbuiltinmodule('__builtin__') diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -149,4 +149,7 @@ def test_error_tcsetattr(self): import termios - raises(ValueError, termios.tcsetattr, 0, 1, (1, 2)) + exc = raises(TypeError, termios.tcsetattr, 0, 1, (1, 2)) + assert str(exc.value) == "tcsetattr, arg 3: must be 7 element list" + exc = raises(TypeError, termios.tcsetattr, 0, 1, (1, 2, 3, 4, 5, 6, 7)) + assert str(exc.value) == "tcsetattr, arg 3: must be 7 element list" From noreply at buildbot.pypy.org Sat Apr 26 04:36:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 04:36:43 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140426023643.BA39A1C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70994:844b089193cd Date: 2014-04-25 22:32 -0400 http://bitbucket.org/pypy/pypy/changeset/844b089193cd/ Log: merge heads diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -25,6 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', + 'Template' : 'pythonify.CppyyTemplateType', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.readbuf_w(w_obj) + buf = space.getarg_w('s*', w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.readbuf_w(w_value) + buf = space.getarg_w('s*', w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.readbuf_w(w_value) + buf = space.getarg_w('s*', w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -450,8 +450,8 @@ class CPPConstructor(CPPMethod): """Method dispatcher that constructs new objects. This method can not have - a fast path, a the allocation of the object is currently left to the - reflection layer only, b/c the C++ class may have an overloaded operator + a fast path, as the allocation of the object is currently left to the + reflection layer only, since the C++ class may have an overloaded operator new, disallowing malloc here.""" _immutable_ = True @@ -460,8 +460,18 @@ # TODO: these casts are very, very un-pretty; need to find a way of # re-using CPPMethod's features w/o these roundabouts vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - w_result = CPPMethod.call(self, vscope, args_w) + cppinstance = None + try: + cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) + use_args_w = args_w[1:] + except (OperationError, TypeError), e: + use_args_w = args_w + w_result = CPPMethod.call(self, vscope, use_args_w) newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + if cppinstance: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return args_w[0] return wrap_cppobject(self.space, newthis, self.scope, do_cast=False, python_owns=True, fresh=True) @@ -1141,10 +1151,14 @@ self.objects = rweakref.RWeakValueDictionary(int, W_CPPInstance) def register(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, obj) def unregister(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, None) @@ -1194,7 +1208,7 @@ w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created - if not fresh: + if not fresh and rawobject: obj = memory_regulator.retrieve(rawobject) if obj is not None and obj.cppclass is cppclass: return obj diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -25,9 +25,12 @@ # class CppyyClass defined in _init_pythonify() class CppyyTemplateType(object): - def __init__(self, scope, name): - self._scope = scope + def __init__(self, name, scope=None): self._name = name + if scope is None: + self._scope = gbl + else: + self._scope = scope def _arg_to_str(self, arg): if arg == str: @@ -143,7 +146,12 @@ raise TypeError(msg) else: def __new__(cls, *args): - return constructor_overload.call(None, *args) + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -206,7 +214,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(scope, template_name) + return CppyyTemplateType(template_name, scope) def get_pycppitem(scope, name): @@ -424,7 +432,9 @@ __metaclass__ = CppyyClassMeta def __init__(self, *args, **kwds): - pass # ignored, for the C++ backend, ctor == __new__ + __init__ + # self is only a placeholder; now create the actual C++ object + args = (self,) + args + self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -3,12 +3,23 @@ #include #include +#include #include #include #include #include +// add example01.cxx code +int globalAddOneToInt(int a); + +namespace dummy { +#include "example01.cxx" +} + +int globalAddOneToInt(int a) { + return dummy::globalAddOneToInt(a); +} /* pseudo-reflection data ------------------------------------------------- */ namespace { @@ -38,28 +49,16 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; -class PseudoExample01 { -public: - PseudoExample01() : m_somedata(-99) {} - PseudoExample01(int a) : m_somedata(a) {} - PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} - PseudoExample01& operator=(const PseudoExample01& e) { - if (this != &e) m_somedata = e.m_somedata; - return *this; - } - virtual ~PseudoExample01() {} - -public: - int m_somedata; -}; - static int example01_last_static_method = 0; static int example01_last_constructor = 0; +static int payload_methods_offset = 0; struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- static long s_scope_id = 0; + + { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; @@ -115,14 +114,81 @@ // cut-off is used in cppyy_is_constructor example01_last_constructor = methods.size(); - // (12) double addDataToDouble(double a) + // (12) int addDataToInt(int a) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + + // (13) int addDataToIntConstRef(const int& a) + argtypes.clear(); + argtypes.push_back("const int&"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + + // (14) int overloadedAddDataToInt(int a, int b) + argtypes.clear(); + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (15) int overloadedAddDataToInt(int a) + // (16) int overloadedAddDataToInt(int a, int b, int c) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (17) double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + // (18) int addDataToAtoi(const char* str) + // (19) char* addToStringValue(const char* str) + argtypes.clear(); + argtypes.push_back("const char*"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + + // (20) void setPayload(payload* p) + // (21) payload* cyclePayload(payload* p) + // (22) payload copyCyclePayload(payload* p) + argtypes.clear(); + argtypes.push_back("payload*"); + methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + + payload_methods_offset = methods.size(); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; - // -- class example01 + } // -- class example01 + + { // class payload -- + s_handles["payload"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // (23) payload(double d = 0.) + std::vector argtypes; + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + + // (24) double getData() + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + + // (25) void setData(double d) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + + Cppyy_PseudoClassInfo info(methods); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class payload } } _init; @@ -150,36 +216,69 @@ return s_handles[scope_name]; // lookup failure will return 0 (== error) } +cppyy_type_t cppyy_actual_class(cppyy_type_t klass, cppyy_object_t /* obj */) { + return klass; +} + /* memory management ------------------------------------------------------ */ void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { if (handle == s_handles["example01"]) - delete (PseudoExample01*)self; + delete (dummy::example01*)self; } /* method/function dispatching -------------------------------------------- */ +void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + switch ((long)method) { + case 5: // static void example01:;staticSetPayload(payload* p, double d) + assert(!self && nargs == 2); + dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 9: // static void example01::setCount(int) + assert(!self && nargs == 1); + dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); + break; + case 20: // void example01::setPayload(payload* p); + assert(self && nargs == 1); + ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_v"); + break; + } +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; switch ((long)method) { - case 1: // static int staticAddOneToInt(int) + case 1: // static int example01::staticAddOneToInt(int) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.in + 1; + result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); break; - case 2: // static int staticAddOneToInt(int, int) + case 2: // static int example01::staticAddOneToInt(int, int) assert(!self && nargs == 2); - result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; + result = dummy::example01::staticAddOneToInt( + ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); break; - case 3: // static int staticAtoi(const char* str) + case 3: // static int example01::staticAtoi(const char* str) assert(!self && nargs == 1); - result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; - case 8: // static int getCount() - assert(!self && nargs == 0); - // can't actually call this method (would need to resolve example01::count), but - // other than the memory tests, most tests just check for 0 at the end - result = 0; - break; + case 8: // static int example01::getCount() + assert(!self && nargs == 0); + result = dummy::example01::getCount(); + break; + case 12: // int example01::addDataToInt(int a) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); + break; + case 18: // int example01::addDataToAtoi(const char* str) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToAtoi( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; default: assert(!"method unknown in cppyy_call_i"); break; @@ -188,26 +287,50 @@ } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - if ((long)method == 4) { // static char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + long result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = (long)dummy::example01::staticStrcpy( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + assert(!self && nargs == 2); + result = (long)dummy::example01::staticCyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 19: // char* example01::addToStringValue(const char* str) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->addToStringValue( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 21: // payload* example01::cyclePayload(payload* p) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->cyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_l"); + break; } - assert(!"method unknown in cppyy_call_l"); - return 0; + return result; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; switch ((long)method) { - case 0: // static double staticAddToDouble(double) + case 0: // static double example01::staticAddToDouble(double) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + result = dummy::example01::staticAddToDouble(((CPPYY_G__value*)args)[0].obj.d); break; - case 12: // double addDataToDouble(double a) + case 17: // double example01::addDataToDouble(double a) assert(self && nargs == 1); - result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + result = ((dummy::example01*)self)->addDataToDouble(((CPPYY_G__value*)args)[0].obj.d); + break; + case 24: // double payload::getData() + assert(self && nargs == 0); + result = ((dummy::payload*)self)->getData(); break; default: assert(!"method unknown in cppyy_call_d"); @@ -217,11 +340,17 @@ } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return strout; + char* result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = dummy::example01::staticStrcpy((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_s"); + break; + } + return result; } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { @@ -230,17 +359,27 @@ switch ((long)method) { case 10: assert(nargs == 0); - result = new PseudoExample01; + result = new dummy::example01; break; case 11: assert(nargs == 1); - result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); break; default: - assert(!"method unknown in cppyy_constructor"); + assert(!"method of example01 unknown in cppyy_constructor"); break; } - } + } else if (handle == s_handles["payload"]) { + switch ((long)method) { + case 23: + if (nargs == 0) result = new dummy::payload; + else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + break; + default: + assert(!"method payload unknown in cppyy_constructor"); + break; + } + } return (cppyy_object_t)result; } @@ -346,8 +485,13 @@ return 0; } -cppyy_method_t cppyy_get_method(cppyy_scope_t /* handle */, cppyy_index_t method_index) { - return (cppyy_method_t)method_index; +cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return (cppyy_method_t)method_index; + else if (handle == s_handles["payload"]) + return (cppyy_method_t)((long)method_index + payload_methods_offset); + assert(!"unknown class in cppyy_get_method"); + return (cppyy_method_t)0; } @@ -356,12 +500,16 @@ if (handle == s_handles["example01"]) return example01_last_static_method <= method_index && method_index < example01_last_constructor; + else if (handle == s_handles["payload"]) + return (long)method_index == 0; return 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { if (handle == s_handles["example01"]) return method_index < example01_last_static_method ? 1 : 0; + if (handle == s_handles["payload"]) + return 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -8,11 +8,12 @@ # run only tests that are covered by the dummy backend and tests # that do not rely on reflex if not ('test_helper.py' in item.location[0] or \ - 'test_cppyy.py' in item.location[0]): + 'test_cppyy.py' in item.location[0] or \ + 'test_pythonify.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if 'test_cppyy.py' in item.location[0] and \ - not re.search("test0[1-36]", item.location[2]): + if 'test_pythonify.py' in item.location[0] and \ + not re.search("AppTestPYTHONIFY.test0[1-6]", item.location[2]): py.test.skip("genreflex is not installed") def pytest_ignore_collect(path, config): @@ -39,10 +40,11 @@ pkgpath = py.path.local(__file__).dirpath().join(os.pardir) srcpath = pkgpath.join('src') incpath = pkgpath.join('include') + tstpath = pkgpath.join('test') eci = ExternalCompilationInfo( separate_module_files=[srcpath.join('dummy_backend.cxx')], - include_dirs=[incpath], + include_dirs=[incpath, tstpath], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/test/example01.cxx b/pypy/module/cppyy/test/example01.cxx --- a/pypy/module/cppyy/test/example01.cxx +++ b/pypy/module/cppyy/test/example01.cxx @@ -1,4 +1,3 @@ -#include #include #include #include diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -321,7 +321,7 @@ e = cppyy.gbl.example01(2) assert 5 == meth(e, 3) - def test01_installable_function(self): + def test15_installable_function(self): """Test installing and calling global C++ function as python method""" import cppyy @@ -332,6 +332,33 @@ assert 2 == e.fresh(1) assert 3 == e.fresh(2) + def test16_subclassing(self): + """A sub-class on the python side should have that class as type""" + + import cppyy + example01 = cppyy.gbl.example01 + + o = example01() + assert type(o) == example01 + + class MyClass1(example01): + def myfunc(self): + return 1 + + o = MyClass1() + assert type(o) == MyClass1 + assert isinstance(o, example01) + assert o.myfunc() == 1 + + class MyClass2(example01): + def __init__(self, what): + example01.__init__(self) + self.what = what + + o = MyClass2('hi') + assert type(o) == MyClass2 + assert o.what == 'hi' + class AppTestPYTHONIFY_UI: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -477,3 +477,29 @@ assert b1 == e2 assert b1 != b2 assert b1 == e2 + + +class AppTestTEMPLATE_UI: + spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + + def setup_class(cls): + cls.w_test_dct = cls.space.wrap(test_dct) + cls.w_stlstring = cls.space.appexec([], """(): + import cppyy, sys + return cppyy.load_reflection_info(%r)""" % (test_dct, )) + + def test01_explicit_templates(self): + """Explicit use of Template class""" + + import cppyy + + vector = cppyy.Template('vector', cppyy.gbl.std) + assert vector[int] == vector(int) + + v = vector[int]() + + N = 10 + v += range(N) + assert len(v) == N + for i in range(N): + assert v[i] == i diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -133,10 +133,7 @@ return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' - def buffer_w(self, w_obj, flags): - return FakeBuffer(w_obj) - - def readbuf_w(self, w_obj): + def getarg_w(self, code, w_obj): # for retrieving buffers return FakeBuffer(w_obj) def exception_match(self, typ, sub): From noreply at buildbot.pypy.org Sat Apr 26 04:36:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 04:36:44 +0200 (CEST) Subject: [pypy-commit] pypy default: unused Message-ID: <20140426023644.D9A3B1C11B1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70995:ab56be458bc6 Date: 2014-04-25 22:35 -0400 http://bitbucket.org/pypy/pypy/changeset/ab56be458bc6/ Log: unused diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -104,7 +104,6 @@ class dummy: pass self.config = dummy() self.config.translating = False - self.BUF_SIMPLE = 1 def issequence_w(self, w_obj): return True From noreply at buildbot.pypy.org Sat Apr 26 05:35:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 05:35:53 +0200 (CEST) Subject: [pypy-commit] pypy default: unused imports Message-ID: <20140426033553.7EC251C023E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70996:19437b58b160 Date: 2014-04-25 23:34 -0400 http://bitbucket.org/pypy/pypy/changeset/19437b58b160/ Log: unused imports diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -3,13 +3,12 @@ """ import operator -from rpython.rlib.buffer import Buffer, StringBuffer, SubBuffer +from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from rpython.rlib.objectmodel import compute_hash -from rpython.rlib.rstring import StringBuilder class W_Buffer(W_Root): diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -6,7 +6,7 @@ from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty From noreply at buildbot.pypy.org Sat Apr 26 06:12:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 06:12:56 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_ioctl_termios Message-ID: <20140426041256.8CFF41C023E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70997:3a67ef86d30d Date: 2014-04-26 00:08 -0400 http://bitbucket.org/pypy/pypy/changeset/3a67ef86d30d/ Log: fix test_ioctl_termios diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -86,7 +86,7 @@ child.expect('ok!') def test_ioctl_termios(self): - source = py.code.Source(""" + source = py.code.Source(r""" import termios import fcntl lgt = len(fcntl.ioctl(2, termios.TIOCGWINSZ, '\000'*8)) From noreply at buildbot.pypy.org Sat Apr 26 08:02:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 08:02:30 +0200 (CEST) Subject: [pypy-commit] pypy default: pep8 Message-ID: <20140426060230.E4ED61C1008@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70998:f57d8ad3b852 Date: 2014-04-26 01:24 -0400 http://bitbucket.org/pypy/pypy/changeset/f57d8ad3b852/ Log: pep8 diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -15,10 +15,11 @@ space.setitem(w_modules, w_main, mainmodule) return mainmodule + def compilecode(space, source, filename, cmd='exec'): w = space.wrap - w_code = space.builtin.call('compile', - w(source), w(filename), w(cmd), w(0), w(0)) + w_code = space.builtin.call( + 'compile', w(source), w(filename), w(cmd), w(0), w(0)) pycode = space.interp_w(eval.Code, w_code) return pycode @@ -28,7 +29,7 @@ cmd = 'eval' else: cmd = 'exec' - + try: if space is None: from pypy.objspace.std import StdObjSpace @@ -55,18 +56,22 @@ operationerr.record_interpreter_traceback() raise + def run_string(source, filename=None, space=None): _run_eval_string(source, filename, space, False) + def eval_string(source, filename=None, space=None): return _run_eval_string(source, filename, space, True) + def run_file(filename, space=None): - if __name__=='__main__': + if __name__ == '__main__': print "Running %r with %r" % (filename, space) istring = open(filename).read() run_string(istring, filename, space) + def run_module(module_name, args, space=None): """Implements PEP 338 'Executing modules as scripts', overwriting sys.argv[1:] using `args` and executing the module `module_name`. @@ -89,7 +94,6 @@ return space.call_function(w_run_module, w(module_name), space.w_None, w('__main__'), space.w_True) -# ____________________________________________________________ def run_toplevel(space, f, verbose=False): """Calls f() and handle all OperationErrors. From noreply at buildbot.pypy.org Sat Apr 26 08:18:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 08:18:09 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix xrange rejecting floats Message-ID: <20140426061809.E0BBA1C023E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70999:171d134c9340 Date: 2014-04-26 02:14 -0400 http://bitbucket.org/pypy/pypy/changeset/171d134c9340/ Log: test/fix xrange rejecting floats diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -351,17 +351,17 @@ self.promote_step = promote_step def descr_new(space, w_subtype, w_start, w_stop=None, w_step=None): - start = _toint(space, w_start) + start = space.int_w(w_start) if space.is_none(w_step): # no step argument provided step = 1 promote_step = True else: - step = _toint(space, w_step) + step = space.int_w(w_step) promote_step = False if space.is_none(w_stop): # only 1 argument provided start, stop = 0, start else: - stop = _toint(space, w_stop) + stop = space.int_w(w_stop) howmany = get_len_of_range(space, start, stop, step) obj = space.allocate_instance(W_XRange, w_subtype) W_XRange.__init__(obj, space, start, howmany, step, promote_step) @@ -425,11 +425,6 @@ minint = -sys.maxint - 1 return minint if last < minint - step else last + step -def _toint(space, w_obj): - # this also supports float arguments. CPython still does, too. - # needs a bit more thinking in general... - return space.int_w(space.int(w_obj)) - W_XRange.typedef = TypeDef("xrange", __new__ = interp2app(W_XRange.descr_new.im_func), __repr__ = interp2app(W_XRange.descr_repr), diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -311,14 +311,14 @@ def test_xrange_len(self): x = xrange(33) assert len(x) == 33 - x = xrange(33.2) - assert len(x) == 33 + exc = raises(TypeError, xrange, 33.2) + assert "integer" in str(exc.value) x = xrange(33,0,-1) assert len(x) == 33 x = xrange(33,0) assert len(x) == 0 - x = xrange(33,0.2) - assert len(x) == 0 + exc = raises(TypeError, xrange, 33, 0.2) + assert "integer" in str(exc.value) x = xrange(0,33) assert len(x) == 33 x = xrange(0,33,-1) diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -1,5 +1,4 @@ class AppTestMap: - def test_trivial_map_one_seq(self): assert map(lambda x: x+2, [1, 2, 3, 4]) == [3, 4, 5, 6] @@ -77,6 +76,7 @@ assert result == [(2, 7), (1, 6), (None, 5), (None, 4), (None, 3), (None, 2), (None, 1)] + class AppTestZip: def test_one_list(self): assert zip([1,2,3]) == [(1,), (2,), (3,)] @@ -93,6 +93,7 @@ yield None assert zip(Foo()) == [] + class AppTestReduce: def test_None(self): raises(TypeError, reduce, lambda x, y: x+y, [1,2,3], None) @@ -105,6 +106,7 @@ assert reduce(lambda x, y: x-y, [10, 2, 8]) == 0 assert reduce(lambda x, y: x-y, [2, 8], 10) == 0 + class AppTestFilter: def test_None(self): assert filter(None, ['a', 'b', 1, 0, None]) == ['a', 'b', 1] @@ -125,6 +127,7 @@ return i * 10 assert filter(lambda x: x != 20, T("abcd")) == (0, 10, 30) + class AppTestXRange: def test_xrange(self): x = xrange(2, 9, 3) @@ -155,7 +158,8 @@ assert list(xrange(0, 10, A())) == [0, 5] def test_xrange_float(self): - assert list(xrange(0.1, 2.0, 1.1)) == [0, 1] + exc = raises(TypeError, xrange, 0.1, 2.0, 1.1) + assert "integer" in str(exc.value) def test_xrange_long(self): import sys @@ -218,6 +222,7 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) + class AppTestApply: def test_apply(self): def f(*args, **kw): @@ -228,6 +233,7 @@ assert apply(f, args) == (args, {}) assert apply(f, args, kw) == (args, kw) + class AppTestAllAny: """ These are copied directly and replicated from the Python 2.5 source code. @@ -277,6 +283,7 @@ S = [10, 20, 30] assert any([x > 42 for x in S]) == False + class AppTestMinMax: def test_min(self): assert min(1, 2) == 1 From noreply at buildbot.pypy.org Sat Apr 26 08:50:52 2014 From: noreply at buildbot.pypy.org (dalcinl) Date: Sat, 26 Apr 2014 08:50:52 +0200 (CEST) Subject: [pypy-commit] cffi default: CPython: Better C -> Python conversion for integer constants Message-ID: <20140426065052.899A31C1008@cobra.cs.uni-duesseldorf.de> Author: Lisandro Dalcin Branch: Changeset: r1506:5e1105060c45 Date: 2014-04-25 12:23 +0300 http://bitbucket.org/cffi/cffi/changeset/5e1105060c45/ Log: CPython: Better C -> Python conversion for integer constants - Silent GCC -Wsign-compare diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -600,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -815,6 +809,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ From noreply at buildbot.pypy.org Sat Apr 26 10:08:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 10:08:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Be resistent against a random "pygame" module without "pygame.error" Message-ID: <20140426080809.17B631D2490@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71000:106784259a88 Date: 2014-04-26 10:07 +0200 http://bitbucket.org/pypy/pypy/changeset/106784259a88/ Log: Be resistent against a random "pygame" module without "pygame.error" diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) From noreply at buildbot.pypy.org Sat Apr 26 10:34:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 10:34:16 +0200 (CEST) Subject: [pypy-commit] cffi default: issue #154: Mingw32 fixes Message-ID: <20140426083416.1DF3B1C1008@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1507:494ce4e8d9f2 Date: 2014-04-26 10:34 +0200 http://bitbucket.org/cffi/cffi/changeset/494ce4e8d9f2/ Log: issue #154: Mingw32 fixes diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -770,7 +770,7 @@ #include #include -#ifdef MS_WIN32 +#if defined(MS_WIN32) && !defined(_STDINT_H) #include /* for alloca() */ typedef __int8 int8_t; typedef __int16 int16_t; From noreply at buildbot.pypy.org Sat Apr 26 12:28:48 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 26 Apr 2014 12:28:48 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: hg merge py3k Message-ID: <20140426102848.EF70C1C1008@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r71001:16e15360b77d Date: 2014-04-26 12:16 +0200 http://bitbucket.org/pypy/pypy/changeset/16e15360b77d/ Log: hg merge py3k diff too long, truncating to 2000 out of 81582 lines diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,11 +48,10 @@ def tearDown(self): os.chdir(self.old_dir) - gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) - shutil.rmtree(self.temp_dir, True) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,7 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) - f.close() + f.close() def test_head(self): response = self.request( diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -71,7 +71,6 @@ 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', - 'MetavarTypeHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', @@ -165,8 +164,6 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position - self._max_help_position = min(max_help_position, - max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -338,7 +335,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width and line: + if line_len + 1 + len(part) > text_width: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -422,8 +419,7 @@ # produce all arg strings elif not action.option_strings: - default = self._get_default_metavar_for_positional(action) - part = self._format_args(action, default) + part = self._format_args(action, action.dest) # if it's in a group, strip the outer [] if action in group_actions: @@ -445,7 +441,7 @@ # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: - default = self._get_default_metavar_for_optional(action) + default = action.dest.upper() args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) @@ -478,7 +474,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = max(self._width - self._current_indent, 11) + text_width = self._width - self._current_indent indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -486,7 +482,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = max(self._width - help_position, 11) + help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -531,8 +527,7 @@ def _format_action_invocation(self, action): if not action.option_strings: - default = self._get_default_metavar_for_positional(action) - metavar, = self._metavar_formatter(action, default)(1) + metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: @@ -546,7 +541,7 @@ # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: - default = self._get_default_metavar_for_optional(action) + default = action.dest.upper() args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) @@ -624,12 +619,6 @@ def _get_help_string(self, action): return action.help - def _get_default_metavar_for_optional(self, action): - return action.dest.upper() - - def _get_default_metavar_for_positional(self, action): - return action.dest - class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. @@ -639,7 +628,7 @@ """ def _fill_text(self, text, width, indent): - return ''.join(indent + line for line in text.splitlines(keepends=True)) + return ''.join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): @@ -670,22 +659,6 @@ return help -class MetavarTypeHelpFormatter(HelpFormatter): - """Help message formatter which uses the argument 'type' as the default - metavar value (instead of the argument 'dest') - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _get_default_metavar_for_optional(self, action): - return action.type.__name__ - - def _get_default_metavar_for_positional(self, action): - return action.type.__name__ - - - # ===================== # Options and Arguments # ===================== @@ -1581,6 +1554,7 @@ usage=None, description=None, epilog=None, + version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', @@ -1589,6 +1563,14 @@ conflict_handler='error', add_help=True): + if version is not None: + import warnings + warnings.warn( + """The "version" argument to ArgumentParser is deprecated. """ + """Please use """ + """"add_argument(..., action='version', version="N", ...)" """ + """instead""", DeprecationWarning) + superinit = super(ArgumentParser, self).__init__ superinit(description=description, prefix_chars=prefix_chars, @@ -1602,6 +1584,7 @@ self.prog = prog self.usage = usage self.epilog = epilog + self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help @@ -1616,7 +1599,7 @@ return string self.register('type', None, identity) - # add help argument if necessary + # add help and version arguments if necessary # (using explicit default to override global argument_default) default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] if self.add_help: @@ -1624,6 +1607,12 @@ default_prefix+'h', default_prefix*2+'help', action='help', default=SUPPRESS, help=_('show this help message and exit')) + if self.version: + self.add_argument( + default_prefix+'v', default_prefix*2+'version', + action='version', default=SUPPRESS, + version=self.version, + help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: @@ -1643,6 +1632,7 @@ 'prog', 'usage', 'description', + 'version', 'formatter_class', 'conflict_handler', 'add_help', @@ -1962,29 +1952,29 @@ # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) - # make sure all required actions were present and also convert - # action defaults which were not given as arguments - required_actions = [] + # if we didn't use all the Positional objects, there were too few + # arg strings supplied. + if positionals: + self.error(_('too few arguments')) + + # make sure all required actions were present, and convert defaults. for action in self._actions: if action not in seen_actions: if action.required: - required_actions.append(_get_action_name(action)) + name = _get_action_name(action) + self.error(_('argument %s is required') % name) else: # Convert action default now instead of doing it before # parsing arguments to avoid calling convert functions # twice (which may fail) if the argument was given, but # only if it was defined already in the namespace if (action.default is not None and - isinstance(action.default, str) and - hasattr(namespace, action.dest) and - action.default is getattr(namespace, action.dest)): + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): setattr(namespace, action.dest, self._get_value(action, action.default)) - if required_actions: - self.error(_('the following arguments are required: %s') % - ', '.join(required_actions)) - # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: @@ -2336,6 +2326,16 @@ # determine help from format above return formatter.format_help() + def format_version(self): + import warnings + warnings.warn( + 'The format_version method is deprecated -- the "version" ' + 'argument to ArgumentParser is no longer supported.', + DeprecationWarning) + formatter = self._get_formatter() + formatter.add_text(self.version) + return formatter.format_help() + def _get_formatter(self): return self.formatter_class(prog=self.prog) @@ -2352,6 +2352,14 @@ file = _sys.stdout self._print_message(self.format_help(), file) + def print_version(self, file=None): + import warnings + warnings.warn( + 'The print_version method is deprecated -- the "version" ' + 'argument to ArgumentParser is no longer supported.', + DeprecationWarning) + self._print_message(self.format_version(), file) + def _print_message(self, message, file=None): if message: if file is None: diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -118,8 +118,7 @@ between keys and values are surrounded by spaces. """ -from collections.abc import MutableMapping -from collections import OrderedDict as _default_dict, ChainMap as _ChainMap +from collections import MutableMapping, OrderedDict as _default_dict, _ChainMap import functools import io import itertools @@ -144,6 +143,23 @@ class Error(Exception): """Base class for ConfigParser exceptions.""" + def _get_message(self): + """Getter for 'message'; needed only to override deprecation in + BaseException. + """ + return self.__message + + def _set_message(self, value): + """Setter for 'message'; needed only to override deprecation in + BaseException. + """ + self.__message = value + + # BaseException.message has been deprecated since Python 2.6. To prevent + # DeprecationWarning from popping up over this pre-existing attribute, use + # a new property that takes lookup precedence. + message = property(_get_message, _set_message) + def __init__(self, msg=''): self.message = msg Exception.__init__(self, msg) @@ -174,7 +190,7 @@ def __init__(self, section, source=None, lineno=None): msg = [repr(section), " already exists"] if source is not None: - message = ["While reading from ", repr(source)] + message = ["While reading from ", source] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": section ") @@ -200,7 +216,7 @@ msg = [repr(option), " in section ", repr(section), " already exists"] if source is not None: - message = ["While reading from ", repr(source)] + message = ["While reading from ", source] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": option ") @@ -286,7 +302,7 @@ raise ValueError("Required argument `source' not given.") elif filename: source = filename - Error.__init__(self, 'Source contains parsing errors: %r' % source) + Error.__init__(self, 'Source contains parsing errors: %s' % source) self.source = source self.errors = [] self.args = (source, ) @@ -322,7 +338,7 @@ def __init__(self, filename, lineno, line): Error.__init__( self, - 'File contains no section headers.\nfile: %r, line: %d\n%r' % + 'File contains no section headers.\nfile: %s, line: %d\n%r' % (filename, lineno, line)) self.source = filename self.lineno = lineno @@ -439,7 +455,7 @@ tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '$' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " - "position %d" % (value, tmp_value.find('$'))) + "position %d" % (value, tmp_value.find('%'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, @@ -943,9 +959,7 @@ # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. - if key == self.default_section: - self._defaults.clear() - elif key in self._sections: + if key in self._sections: self._sections[key].clear() self.read_dict({key: value}) @@ -991,26 +1005,18 @@ indent_level = 0 e = None # None, or an exception for lineno, line in enumerate(fp, start=1): - comment_start = sys.maxsize + comment_start = None # strip inline comments - inline_prefixes = {p: -1 for p in self._inline_comment_prefixes} - while comment_start == sys.maxsize and inline_prefixes: - next_prefixes = {} - for prefix, index in inline_prefixes.items(): - index = line.find(prefix, index+1) - if index == -1: - continue - next_prefixes[prefix] = index - if index == 0 or (index > 0 and line[index-1].isspace()): - comment_start = min(comment_start, index) - inline_prefixes = next_prefixes + for prefix in self._inline_comment_prefixes: + index = line.find(prefix) + if index == 0 or (index > 0 and line[index-1].isspace()): + comment_start = index + break # strip full line comments for prefix in self._comment_prefixes: if line.strip().startswith(prefix): comment_start = 0 break - if comment_start == sys.maxsize: - comment_start = None value = line[:comment_start].strip() if not value: if self._empty_lines_in_values: diff --git a/lib-python/3/distutils/__init__.py b/lib-python/3/distutils/__init__.py --- a/lib-python/3/distutils/__init__.py +++ b/lib-python/3/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.3.5" +__version__ = "3.2.5" #--end constants-- diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -4,11 +4,10 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re +import sys, os, re, imp from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version -from distutils.sysconfig import get_config_h_filename from distutils.dep_util import newer_group from distutils.extension import Extension from distutils.util import get_platform @@ -36,6 +35,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext(Command): @@ -160,11 +164,6 @@ if isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) - # If in a virtualenv, add its include directory - # Issue 16116 - if sys.exec_prefix != sys.base_exec_prefix: - self.include_dirs.append(os.path.join(sys.exec_prefix, 'include')) - # Put the Python "system" include dir at the end, so that # any local include dirs take precedence. self.include_dirs.append(py_include) @@ -194,9 +193,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) - if sys.base_exec_prefix != sys.prefix: # Issue 16116 - self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -204,11 +201,13 @@ # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.dirname(get_config_h_filename())) - _sys_home = getattr(sys, '_home', None) - if _sys_home: - self.library_dirs.append(_sys_home) - if MSVC_VERSION >= 9: + if 0: + # pypy has no PC directory + self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) + if 1: + # pypy has no PCBuild directory + pass + elif MSVC_VERSION == 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' @@ -247,10 +246,12 @@ # building python standard extensions self.library_dirs.append('.') - # For building extensions with a shared Python library, + # for extensions under Linux or Solaris with a shared Python library, # Python's library directory must be appended to library_dirs - # See Issues: #1600860, #4366 - if (sysconfig.get_config_var('Py_ENABLE_SHARED')): + sysconfig.get_config_var('Py_ENABLE_SHARED') + if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu') + or sys.platform.startswith('sunos')) + and sysconfig.get_config_var('Py_ENABLE_SHARED')): if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) @@ -675,10 +676,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + ext_suffix = _get_c_extension_suffix() + if ext_suffix is None: + ext_suffix = get_config_var('EXT_SUFFIX') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - ext_suffix = get_config_var('EXT_SUFFIX') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + ext_suffix + ext_suffix = '_d.pyd' return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): @@ -697,24 +706,17 @@ shared extension. On most platforms, this is just 'ext.libraries'; on Windows and OS/2, we add the Python library (eg. python20.dll). """ - # The python library is always needed on Windows. For MSVC, this - # is redundant, since the library is mentioned in a pragma in - # pyconfig.h that MSVC groks. The other Windows compilers all seem - # to need it mentioned explicitly, though, so that's what we do. - # Append '_d' to the python import library on debug builds. + # For PyPy, we must not add any such Python library, on any platform + if "__pypy__" in sys.builtin_module_names: + return ext.libraries + # The python library is always needed on Windows. if sys.platform == "win32": - from distutils.msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): - template = "python%d%d" - if self.debug: - template = template + '_d' - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] - else: - return ext.libraries + template = "python%d%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + # don't extend ext.libraries, it may be shared with other + # extensions, it is a reference to the original list + return ext.libraries + [pythonlib] elif sys.platform == "os2emx": # EMX/GCC requires the python library explicitly, and I # believe VACPP does as well (though not confirmed) - AIM Apr01 diff --git a/lib-python/3/distutils/sysconfig.py b/lib-python/3/distutils/sysconfig.py --- a/lib-python/3/distutils/sysconfig.py +++ b/lib-python/3/distutils/sysconfig.py @@ -9,589 +9,17 @@ Email: """ -import os -import re import sys -from .errors import DistutilsPlatformError -# These are needed in a couple of spots, so just compute them once. -PREFIX = os.path.normpath(sys.prefix) -EXEC_PREFIX = os.path.normpath(sys.exec_prefix) -BASE_PREFIX = os.path.normpath(sys.base_prefix) -BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) +# The content of this file is redirected from +# sysconfig_cpython or sysconfig_pypy. -# Path to the base directory of the project. On Windows the binary may -# live in project/PCBuild9. If we're dealing with an x64 Windows build, -# it'll live in project/PCbuild/amd64. -# set for cross builds -if "_PYTHON_PROJECT_BASE" in os.environ: - project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"]) +if '__pypy__' in sys.builtin_module_names: + from distutils.sysconfig_pypy import * + from distutils.sysconfig_pypy import _config_vars # needed by setuptools + from distutils.sysconfig_pypy import _variable_rx # read_setup_file() else: - project_base = os.path.dirname(os.path.abspath(sys.executable)) -if os.name == "nt" and "pcbuild" in project_base[-8:].lower(): - project_base = os.path.abspath(os.path.join(project_base, os.path.pardir)) -# PC/VS7.1 -if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower(): - project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, - os.path.pardir)) -# PC/AMD64 -if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower(): - project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, - os.path.pardir)) - -# python_build: (Boolean) if true, we're either building Python or -# building an extension with an un-installed Python, so we use -# different (hard-wired) directories. -# Setup.local is available for Makefile builds including VPATH builds, -# Setup.dist is available on Windows -def _is_python_source_dir(d): - for fn in ("Setup.dist", "Setup.local"): - if os.path.isfile(os.path.join(d, "Modules", fn)): - return True - return False -_sys_home = getattr(sys, '_home', None) -if _sys_home and os.name == 'nt' and \ - _sys_home.lower().endswith(('pcbuild', 'pcbuild\\amd64')): - _sys_home = os.path.dirname(_sys_home) - if _sys_home.endswith('pcbuild'): # must be amd64 - _sys_home = os.path.dirname(_sys_home) -def _python_build(): - if _sys_home: - return _is_python_source_dir(_sys_home) - return _is_python_source_dir(project_base) -python_build = _python_build() - -# Calculate the build qualifier flags if they are defined. Adding the flags -# to the include and lib directories only makes sense for an installation, not -# an in-source build. -build_flags = '' -try: - if not python_build: - build_flags = sys.abiflags -except AttributeError: - # It's not a configure-based build, so the sys module doesn't have - # this attribute, which is fine. - pass - -def get_python_version(): - """Return a string containing the major and minor Python version, - leaving off the patchlevel. Sample return values could be '1.5' - or '2.2'. - """ - return sys.version[:3] - - -def get_python_inc(plat_specific=0, prefix=None): - """Return the directory containing installed Python header files. - - If 'plat_specific' is false (the default), this is the path to the - non-platform-specific header files, i.e. Python.h and so on; - otherwise, this is the path to platform-specific header files - (namely pyconfig.h). - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - if prefix is None: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - if os.name == "posix": - if python_build: - # Assume the executable is in the build directory. The - # pyconfig.h file should be in the same directory. Since - # the build directory may not be the source directory, we - # must use "srcdir" from the makefile to find the "Include" - # directory. - base = _sys_home or project_base - if plat_specific: - return base - if _sys_home: - incdir = os.path.join(_sys_home, get_config_var('AST_H_DIR')) - else: - incdir = os.path.join(get_config_var('srcdir'), 'Include') - return os.path.normpath(incdir) - python_dir = 'python' + get_python_version() + build_flags - return os.path.join(prefix, "include", python_dir) - elif os.name == "nt": - return os.path.join(prefix, "include") - elif os.name == "os2": - return os.path.join(prefix, "Include") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its C header files " - "on platform '%s'" % os.name) - - -def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): - """Return the directory containing the Python library (standard or - site additions). - - If 'plat_specific' is true, return the directory containing - platform-specific modules, i.e. any module from a non-pure-Python - module distribution; otherwise, return the platform-shared library - directory. If 'standard_lib' is true, return the directory - containing standard Python library modules; otherwise, return the - directory for site-specific modules. - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - if prefix is None: - if standard_lib: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - else: - prefix = plat_specific and EXEC_PREFIX or PREFIX - - if os.name == "posix": - libpython = os.path.join(prefix, - "lib", "python" + get_python_version()) - if standard_lib: - return libpython - else: - return os.path.join(libpython, "site-packages") - elif os.name == "nt": - if standard_lib: - return os.path.join(prefix, "Lib") - else: - if get_python_version() < "2.2": - return prefix - else: - return os.path.join(prefix, "Lib", "site-packages") - elif os.name == "os2": - if standard_lib: - return os.path.join(prefix, "Lib") - else: - return os.path.join(prefix, "Lib", "site-packages") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its library " - "on platform '%s'" % os.name) - - - -def customize_compiler(compiler): - """Do any platform-specific customization of a CCompiler instance. - - Mainly needed on Unix, so we can plug in the information that - varies across Unices and is stored in Python's Makefile. - """ - if compiler.compiler_type == "unix": - if sys.platform == "darwin": - # Perform first-time customization of compiler-related - # config vars on OS X now that we know we need a compiler. - # This is primarily to support Pythons from binary - # installers. The kind and paths to build tools on - # the user system may vary significantly from the system - # that Python itself was built on. Also the user OS - # version and build tools may not support the same set - # of CPU architectures for universal builds. - global _config_vars - if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): - import _osx_support - _osx_support.customize_compiler(_config_vars) - _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - - (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ - get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') - - if 'CC' in os.environ: - newcc = os.environ['CC'] - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - ldshared = newcc + ldshared[len(cc):] - cc = newcc - if 'CXX' in os.environ: - cxx = os.environ['CXX'] - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - if 'AR' in os.environ: - ar = os.environ['AR'] - if 'ARFLAGS' in os.environ: - archiver = ar + ' ' + os.environ['ARFLAGS'] - else: - archiver = ar + ' ' + ar_flags - - cc_cmd = cc + ' ' + cflags - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - compiler_cxx=cxx, - linker_so=ldshared, - linker_exe=cc, - archiver=archiver) - - compiler.shared_lib_extension = shlib_suffix - - -def get_config_h_filename(): - """Return full pathname of installed pyconfig.h file.""" - if python_build: - if os.name == "nt": - inc_dir = os.path.join(_sys_home or project_base, "PC") - else: - inc_dir = _sys_home or project_base - else: - inc_dir = get_python_inc(plat_specific=1) - if get_python_version() < '2.2': - config_h = 'config.h' - else: - # The name of the config.h file changed in 2.2 - config_h = 'pyconfig.h' - return os.path.join(inc_dir, config_h) - - -def get_makefile_filename(): - """Return full pathname of installed Makefile from the Python build.""" - if python_build: - return os.path.join(_sys_home or project_base, "Makefile") - lib_dir = get_python_lib(plat_specific=0, standard_lib=1) - config_file = 'config-{}{}'.format(get_python_version(), build_flags) - return os.path.join(lib_dir, config_file, 'Makefile') - - -def parse_config_h(fp, g=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if g is None: - g = {} - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - # - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: v = int(v) - except ValueError: pass - g[n] = v - else: - m = undef_rx.match(line) - if m: - g[m.group(1)] = 0 - return g - - -# Regexes needed for parsing Makefile (and similar syntaxes, -# like old-style Setup files). -_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") -_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") -_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - -def parse_makefile(fn, g=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - from distutils.text_file import TextFile - fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") - - if g is None: - g = {} - done = {} - notdone = {} - - while True: - line = fp.readline() - if line is None: # eof - break - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - # do variable interpolation here - while notdone: - for name in list(notdone): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if name.startswith('PY_') and name[3:] in renamed_variables: - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - else: - done[n] = item = "" - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - del notdone[name] - - if name.startswith('PY_') \ - and name[3:] in renamed_variables: - - name = name[3:] - if name not in done: - done[name] = value - else: - # bogus variable reference; just drop it since we can't deal - del notdone[name] - - fp.close() - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - g.update(done) - return g - - -def expand_makefile_vars(s, vars): - """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in - 'string' according to 'vars' (a dictionary mapping variable names to - values). Variables not present in 'vars' are silently expanded to the - empty string. The variable values in 'vars' should not contain further - variable expansions; if 'vars' is the output of 'parse_makefile()', - you're fine. Returns a variable-expanded version of 's'. - """ - - # This algorithm does multiple expansion, so if vars['foo'] contains - # "${bar}", it will expand ${foo} to ${bar}, and then expand - # ${bar}... and so forth. This is fine as long as 'vars' comes from - # 'parse_makefile()', which takes care of such expansions eagerly, - # according to make's variable expansion semantics. - - while True: - m = _findvar1_rx.search(s) or _findvar2_rx.search(s) - if m: - (beg, end) = m.span() - s = s[0:beg] + vars.get(m.group(1)) + s[end:] - else: - break - return s - - -_config_vars = None - -def _init_posix(): - """Initialize the module as appropriate for POSIX systems.""" - g = {} - # load the installed Makefile: - try: - filename = get_makefile_filename() - parse_makefile(filename, g) - except IOError as msg: - my_msg = "invalid Python installation: unable to open %s" % filename - if hasattr(msg, "strerror"): - my_msg = my_msg + " (%s)" % msg.strerror - - raise DistutilsPlatformError(my_msg) - - # load the installed pyconfig.h: - try: - filename = get_config_h_filename() - with open(filename) as file: - parse_config_h(file, g) - except IOError as msg: - my_msg = "invalid Python installation: unable to open %s" % filename - if hasattr(msg, "strerror"): - my_msg = my_msg + " (%s)" % msg.strerror - - raise DistutilsPlatformError(my_msg) - - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if python_build: - g['LDSHARED'] = g['BLDSHARED'] - - elif get_python_version() < '2.1': - # The following two branches are for 1.5.2 compatibility. - if sys.platform == 'aix4': # what about AIX 3.x ? - # Linker script is in the config directory, not in Modules as the - # Makefile says. - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - - g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp) - - global _config_vars - _config_vars = g - - -def _init_nt(): - """Initialize the module as appropriate for NT""" - g = {} - # set basic install directories - g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) - g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) - - # XXX hmmm.. a normal install puts include files here - g['INCLUDEPY'] = get_python_inc(plat_specific=0) - - g['SO'] = '.pyd' - g['EXT_SUFFIX'] = '.pyd' - g['EXE'] = ".exe" - g['VERSION'] = get_python_version().replace(".", "") - g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) - - global _config_vars - _config_vars = g - - -def _init_os2(): - """Initialize the module as appropriate for OS/2""" - g = {} - # set basic install directories - g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) - g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) - - # XXX hmmm.. a normal install puts include files here - g['INCLUDEPY'] = get_python_inc(plat_specific=0) - - g['SO'] = '.pyd' - g['EXT_SUFFIX'] = '.pyd' - g['EXE'] = ".exe" - - global _config_vars - _config_vars = g - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. Generally this includes - everything needed to build extensions and install both pure modules and - extensions. On Unix, this means every variable defined in Python's - installed Makefile; on Windows it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _config_vars - if _config_vars is None: - func = globals().get("_init_" + os.name) - if func: - func() - else: - _config_vars = {} - - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # Distutils. - _config_vars['prefix'] = PREFIX - _config_vars['exec_prefix'] = EXEC_PREFIX - - # Always convert srcdir to an absolute path - srcdir = _config_vars.get('srcdir', project_base) - if os.name == 'posix': - if python_build: - # If srcdir is a relative path (typically '.' or '..') - # then it should be interpreted relative to the directory - # containing Makefile. - base = os.path.dirname(get_makefile_filename()) - srcdir = os.path.join(base, srcdir) - else: - # srcdir is not meaningful since the installation is - # spread about the filesystem. We choose the - # directory containing the Makefile since we know it - # exists. - srcdir = os.path.dirname(get_makefile_filename()) - _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) - - # Convert srcdir into an absolute path if it appears necessary. - # Normally it is relative to the build directory. However, during - # testing, for example, we might be running a non-installed python - # from a different directory. - if python_build and os.name == "posix": - base = project_base - if (not os.path.isabs(_config_vars['srcdir']) and - base != os.getcwd()): - # srcdir is relative and we are not in the same directory - # as the executable. Assume executable is in the build - # directory and make srcdir absolute. - srcdir = os.path.join(base, _config_vars['srcdir']) - _config_vars['srcdir'] = os.path.normpath(srcdir) - - # OS X platforms require special customization to handle - # multi-architecture, multi-os-version installers - if sys.platform == 'darwin': - import _osx_support - _osx_support.customize_config_vars(_config_vars) - - if args: - vals = [] - for name in args: - vals.append(_config_vars.get(name)) - return vals - else: - return _config_vars - -def get_config_var(name): - """Return the value of a single variable using the dictionary - returned by 'get_config_vars()'. Equivalent to - get_config_vars().get(name) - """ - return get_config_vars().get(name) + from distutils.sysconfig_cpython import * + from distutils.sysconfig_cpython import _config_vars # needed by setuptools + from distutils.sysconfig_cpython import _variable_rx # read_setup_file() diff --git a/lib-python/3/distutils/sysconfig_cpython.py b/lib-python/3/distutils/sysconfig_cpython.py --- a/lib-python/3/distutils/sysconfig_cpython.py +++ b/lib-python/3/distutils/sysconfig_cpython.py @@ -146,7 +146,7 @@ "I don't know where Python installs its library " "on platform '%s'" % os.name) -_USE_CLANG = None + def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. @@ -155,42 +155,28 @@ varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": - (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' + + (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') newcc = None if 'CC' in os.environ: - newcc = os.environ['CC'] - elif sys.platform == 'darwin' and cc == 'gcc-4.2': - # Issue #13590: - # Since Apple removed gcc-4.2 in Xcode 4.2, we can no - # longer assume it is available for extension module builds. - # If Python was built with gcc-4.2, check first to see if - # it is available on this system; if not, try to use clang - # instead unless the caller explicitly set CC. - global _USE_CLANG - if _USE_CLANG is None: - from distutils import log - from subprocess import Popen, PIPE - p = Popen("! type gcc-4.2 && type clang && exit 2", - shell=True, stdout=PIPE, stderr=PIPE) - p.wait() - if p.returncode == 2: - _USE_CLANG = True - log.warn("gcc-4.2 not found, using clang instead") - else: - _USE_CLANG = False - if _USE_CLANG: - newcc = 'clang' - if newcc: - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - ldshared = newcc + ldshared[len(cc):] - cc = newcc + cc = os.environ['CC'] if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: @@ -225,7 +211,7 @@ linker_exe=cc, archiver=archiver) - compiler.shared_lib_extension = so_ext + compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): @@ -480,6 +466,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) @@ -499,6 +486,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" global _config_vars @@ -543,43 +531,11 @@ srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _config_vars[key] = flags - - else: - - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _config_vars[key] = flags + import _osx_support + _osx_support.customize_config_vars(_config_vars) if args: vals = [] diff --git a/lib-python/3/distutils/tests/test_build_ext.py b/lib-python/3/distutils/tests/test_build_ext.py --- a/lib-python/3/distutils/tests/test_build_ext.py +++ b/lib-python/3/distutils/tests/test_build_ext.py @@ -61,9 +61,9 @@ sys.stdout = old_stdout if ALREADY_TESTED: - self.skipTest('Already tested in %s' % ALREADY_TESTED) + return else: - ALREADY_TESTED = type(self).__name__ + ALREADY_TESTED = True import xx @@ -76,8 +76,8 @@ if support.HAVE_DOCSTRINGS: doc = 'This is a template module just for instruction.' self.assertEqual(xx.__doc__, doc) - self.assertIsInstance(xx.Null(), xx.Null) - self.assertIsInstance(xx.Str(), xx.Str) + self.assertTrue(isinstance(xx.Null(), xx.Null)) + self.assertTrue(isinstance(xx.Str(), xx.Str)) def tearDown(self): # Get everything back to normal @@ -110,9 +110,13 @@ _config_vars['Py_ENABLE_SHARED'] = old_var # make sure we get some library dirs under solaris - self.assertGreater(len(cmd.library_dirs), 0) + self.assertTrue(len(cmd.library_dirs) > 0) def test_user_site(self): + # site.USER_SITE was introduced in 2.6 + if sys.version < '2.6': + return + import site dist = Distribution({'name': 'xx'}) cmd = build_ext(dist) @@ -120,7 +124,7 @@ # making sure the user option is there options = [name for name, short, lable in cmd.user_options] - self.assertIn('user', options) + self.assertTrue('user' in options) # setting a value cmd.user = 1 @@ -167,10 +171,10 @@ from distutils import sysconfig py_include = sysconfig.get_python_inc() - self.assertIn(py_include, cmd.include_dirs) + self.assertTrue(py_include in cmd.include_dirs) plat_py_include = sysconfig.get_python_inc(plat_specific=1) - self.assertIn(plat_py_include, cmd.include_dirs) + self.assertTrue(plat_py_include in cmd.include_dirs) # make sure cmd.libraries is turned into a list # if it's a string @@ -251,13 +255,13 @@ 'some': 'bar'})] cmd.check_extensions_list(exts) ext = exts[0] - self.assertIsInstance(ext, Extension) + self.assertTrue(isinstance(ext, Extension)) # check_extensions_list adds in ext the values passed # when they are in ('include_dirs', 'library_dirs', 'libraries' # 'extra_objects', 'extra_compile_args', 'extra_link_args') self.assertEqual(ext.libraries, 'foo') - self.assertFalse(hasattr(ext, 'some')) + self.assertTrue(not hasattr(ext, 'some')) # 'macros' element of build info dict must be 1- or 2-tuple exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', diff --git a/lib-python/3/distutils/tests/test_sysconfig.py b/lib-python/3/distutils/tests/test_sysconfig.py --- a/lib-python/3/distutils/tests/test_sysconfig.py +++ b/lib-python/3/distutils/tests/test_sysconfig.py @@ -50,41 +50,15 @@ def test_get_config_vars(self): cvars = sysconfig.get_config_vars() - self.assertIsInstance(cvars, dict) + self.assertTrue(isinstance(cvars, dict)) self.assertTrue(cvars) - def test_srcdir(self): - # See Issues #15322, #15364. - srcdir = sysconfig.get_config_var('srcdir') + def test_customize_compiler(self): - self.assertTrue(os.path.isabs(srcdir), srcdir) - self.assertTrue(os.path.isdir(srcdir), srcdir) + # not testing if default compiler is not unix + if get_default_compiler() != 'unix': + return - if sysconfig.python_build: - # The python executable has not been installed so srcdir - # should be a full source checkout. - Python_h = os.path.join(srcdir, 'Include', 'Python.h') - self.assertTrue(os.path.exists(Python_h), Python_h) - self.assertTrue(sysconfig._is_python_source_dir(srcdir)) - elif os.name == 'posix': - self.assertEqual(os.path.dirname(sysconfig.get_makefile_filename()), - srcdir) - - def test_srcdir_independent_of_cwd(self): - # srcdir should be independent of the current working directory - # See Issues #15322, #15364. - srcdir = sysconfig.get_config_var('srcdir') - cwd = os.getcwd() - try: - os.chdir('..') - srcdir2 = sysconfig.get_config_var('srcdir') - finally: - os.chdir(cwd) - self.assertEqual(srcdir, srcdir2) - - @unittest.skipUnless(get_default_compiler() == 'unix', - 'not testing if default compiler is not unix') - def test_customize_compiler(self): os.environ['AR'] = 'my_ar' os.environ['ARFLAGS'] = '-arflags' @@ -147,7 +121,7 @@ import sysconfig as global_sysconfig if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'): - self.skipTest('compiler flags customized') + return self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), sysconfig.get_config_var('LDSHARED')) self.assertEqual(global_sysconfig.get_config_var('CC'), sysconfig.get_config_var('CC')) diff --git a/lib-python/3/email/charset.py b/lib-python/3/email/charset.py --- a/lib-python/3/email/charset.py +++ b/lib-python/3/email/charset.py @@ -194,7 +194,7 @@ header encoding. Charset.SHORTEST is not allowed for body_encoding. - output_charset: Some character sets must be converted before they can be + output_charset: Some character sets must be converted before the can be used in email headers or bodies. If the input_charset is one of them, this attribute will contain the name of the charset output will be converted to. Otherwise, it will @@ -386,8 +386,7 @@ string using the ascii codec produces the correct string version of the content. """ - if not string: - return string + # 7bit/8bit encodings return the string unchanged (module conversions) if self.body_encoding is BASE64: if isinstance(string, str): string = string.encode(self.output_charset) @@ -399,9 +398,13 @@ # character set, then, we must turn it into pseudo bytes via the # latin1 charset, which will encode any byte as a single code point # between 0 and 255, which is what body_encode is expecting. + # + # Note that this clause doesn't handle the case of a _payload that + # is already bytes. It never did, and the semantics of _payload + # being bytes has never been nailed down, so fixing that is a + # longer term TODO. if isinstance(string, str): - string = string.encode(self.output_charset) - string = string.decode('latin1') + string = string.encode(self.output_charset).decode('latin1') return email.quoprimime.body_encode(string) else: if isinstance(string, str): diff --git a/lib-python/3/email/encoders.py b/lib-python/3/email/encoders.py --- a/lib-python/3/email/encoders.py +++ b/lib-python/3/email/encoders.py @@ -71,8 +71,16 @@ msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' + if not isinstance(orig, str): + msg.set_payload(orig.decode('ascii', 'surrogateescape')) def encode_noop(msg): """Do nothing.""" + # Well, not quite *nothing*: in Python3 we have to turn bytes into a string + # in our internal surrogateescaped form in order to keep the model + # consistent. + orig = msg.get_payload() + if not isinstance(orig, str): + msg.set_payload(orig.decode('ascii', 'surrogateescape')) diff --git a/lib-python/3/email/generator.py b/lib-python/3/email/generator.py --- a/lib-python/3/email/generator.py +++ b/lib-python/3/email/generator.py @@ -12,12 +12,9 @@ import random import warnings -from copy import deepcopy from io import StringIO, BytesIO -from email._policybase import compat32 from email.header import Header -from email.utils import _has_surrogates -import email.charset as _charset +from email.message import _has_surrogates UNDERSCORE = '_' NL = '\n' # XXX: no longer used by the code below. @@ -36,8 +33,7 @@ # Public interface # - def __init__(self, outfp, mangle_from_=True, maxheaderlen=None, *, - policy=None): + def __init__(self, outfp, mangle_from_=True, maxheaderlen=78): """Create the generator for message flattening. outfp is the output file-like object for writing the message to. It @@ -53,22 +49,16 @@ defined in the Header class. Set maxheaderlen to zero to disable header wrapping. The default is 78, as recommended (but not required) by RFC 2822. - - The policy keyword specifies a policy object that controls a number of - aspects of the generator's operation. The default policy maintains - backward compatibility. - """ self._fp = outfp self._mangle_from_ = mangle_from_ - self.maxheaderlen = maxheaderlen - self.policy = policy + self._maxheaderlen = maxheaderlen def write(self, s): # Just delegate to the file object self._fp.write(s) - def flatten(self, msg, unixfrom=False, linesep=None): + def flatten(self, msg, unixfrom=False, linesep='\n'): r"""Print the message object tree rooted at msg to the output file specified when the Generator instance was created. @@ -80,47 +70,29 @@ Note that for subobjects, no From_ line is printed. linesep specifies the characters used to indicate a new line in - the output. The default value is determined by the policy. + the output. The default value is the most useful for typical + Python applications, but it can be set to \r\n to produce RFC-compliant + line separators when needed. """ # We use the _XXX constants for operating on data that comes directly # from the msg, and _encoded_XXX constants for operating on data that # has already been converted (to bytes in the BytesGenerator) and # inserted into a temporary buffer. - policy = msg.policy if self.policy is None else self.policy - if linesep is not None: - policy = policy.clone(linesep=linesep) - if self.maxheaderlen is not None: - policy = policy.clone(max_line_length=self.maxheaderlen) - self._NL = policy.linesep - self._encoded_NL = self._encode(self._NL) + self._NL = linesep + self._encoded_NL = self._encode(linesep) self._EMPTY = '' self._encoded_EMTPY = self._encode('') - # Because we use clone (below) when we recursively process message - # subparts, and because clone uses the computed policy (not None), - # submessages will automatically get set to the computed policy when - # they are processed by this code. - old_gen_policy = self.policy - old_msg_policy = msg.policy - try: - self.policy = policy - msg.policy = policy - if unixfrom: - ufrom = msg.get_unixfrom() - if not ufrom: - ufrom = 'From nobody ' + time.ctime(time.time()) - self.write(ufrom + self._NL) - self._write(msg) - finally: - self.policy = old_gen_policy - msg.policy = old_msg_policy + if unixfrom: + ufrom = msg.get_unixfrom() + if not ufrom: + ufrom = 'From nobody ' + time.ctime(time.time()) + self.write(ufrom + self._NL) + self._write(msg) def clone(self, fp): """Clone this generator with the exact same options.""" - return self.__class__(fp, - self._mangle_from_, - None, # Use policy setting, which we've adjusted - policy=self.policy) + return self.__class__(fp, self._mangle_from_, self._maxheaderlen) # # Protected interface - undocumented ;/ @@ -174,18 +146,10 @@ # necessary. oldfp = self._fp try: - self._munge_cte = None self._fp = sfp = self._new_buffer() self._dispatch(msg) finally: self._fp = oldfp - munge_cte = self._munge_cte - del self._munge_cte - # If we munged the cte, copy the message again and re-fix the CTE. - if munge_cte: - msg = deepcopy(msg) - msg.replace_header('content-transfer-encoding', munge_cte[0]) - msg.replace_header('content-type', munge_cte[1]) # Write the headers. First we see if the message object wants to # handle that itself. If not, we'll do it generically. meth = getattr(msg, '_write_headers', None) @@ -216,8 +180,16 @@ # def _write_headers(self, msg): - for h, v in msg.raw_items(): - self.write(self.policy.fold(h, v)) + for h, v in msg.items(): + self.write('%s: ' % h) + if isinstance(v, Header): + self.write(v.encode( + maxlinelen=self._maxheaderlen, linesep=self._NL)+self._NL) + else: + # Header's got lots of smarts, so use it. + header = Header(v, maxlinelen=self._maxheaderlen, + header_name=h) + self.write(header.encode(linesep=self._NL)+self._NL) # A blank line always separates headers from body self.write(self._NL) @@ -234,14 +206,9 @@ if _has_surrogates(msg._payload): charset = msg.get_param('charset') if charset is not None: - # XXX: This copy stuff is an ugly hack to avoid modifying the - # existing message. - msg = deepcopy(msg) del msg['content-transfer-encoding'] msg.set_payload(payload, charset) payload = msg.get_payload() - self._munge_cte = (msg['content-transfer-encoding'], - msg['content-type']) if self._mangle_from_: payload = fcre.sub('>From ', payload) self._write_lines(payload) @@ -299,8 +266,9 @@ # body-part self._fp.write(body_part) # close-delimiter transport-padding - self.write(self._NL + '--' + boundary + '--' + self._NL) + self.write(self._NL + '--' + boundary + '--') if msg.epilogue is not None: + self.write(self._NL) if self._mangle_from_: epilogue = fcre.sub('>From ', msg.epilogue) else: @@ -311,12 +279,12 @@ # The contents of signed parts has to stay unmodified in order to keep # the signature intact per RFC1847 2.1, so we disable header wrapping. # RDM: This isn't enough to completely preserve the part, but it helps. - p = self.policy - self.policy = p.clone(max_line_length=0) + old_maxheaderlen = self._maxheaderlen try: + self._maxheaderlen = 0 self._handle_multipart(msg) finally: - self.policy = p + self._maxheaderlen = old_maxheaderlen def _handle_message_delivery_status(self, msg): # We can't just write the headers directly to self's file object @@ -351,18 +319,16 @@ # message/rfc822. Such messages are generated by, for example, # Groupwise when forwarding unadorned messages. (Issue 7970.) So # in that case we just emit the string body. - payload = msg._payload + payload = msg.get_payload() if isinstance(payload, list): g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL) payload = s.getvalue() - else: - payload = self._encode(payload) self._fp.write(payload) # This used to be a module level function; we use a classmethod for this # and _compile_re so we can continue to provide the module level function # for backward compatibility by doing - # _make_boundary = Generator._make_boundary + # _make_boudary = Generator._make_boundary # at the end of the module. It *is* internal, so we could drop that... @classmethod def _make_boundary(cls, text=None): @@ -392,10 +358,7 @@ Functionally identical to the base Generator except that the output is bytes and not string. When surrogates were used in the input to encode - bytes, these are decoded back to bytes for output. If the policy has - cte_type set to 7bit, then the message is transformed such that the - non-ASCII bytes are properly content transfer encoded, using the charset - unknown-8bit. + bytes, these are decoded back to bytes for output. The outfp object must accept bytes in its write method. """ @@ -416,8 +379,23 @@ def _write_headers(self, msg): # This is almost the same as the string version, except for handling # strings with 8bit bytes. - for h, v in msg.raw_items(): - self._fp.write(self.policy.fold_binary(h, v)) + for h, v in msg._headers: + self.write('%s: ' % h) + if isinstance(v, Header): + self.write(v.encode(maxlinelen=self._maxheaderlen)+self._NL) + elif _has_surrogates(v): + # If we have raw 8bit data in a byte string, we have no idea + # what the encoding is. There is no safe way to split this + # string. If it's ascii-subset, then we could do a normal + # ascii split, but if it's multibyte then we could break the + # string. There's no way to know so the least harm seems to + # be to not split the string and risk it being too long. + self.write(v+NL) + else: + # Header's got lots of smarts and this string is safe... + header = Header(v, maxlinelen=self._maxheaderlen, + header_name=h) + self.write(header.encode(linesep=self._NL)+self._NL) # A blank line always separates headers from body self.write(self._NL) @@ -426,7 +404,7 @@ # just write it back out. if msg._payload is None: return - if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit': + if _has_surrogates(msg._payload): if self._mangle_from_: msg._payload = fcre.sub(">From ", msg._payload) self._write_lines(msg._payload) diff --git a/lib-python/3/email/message.py b/lib-python/3/email/message.py --- a/lib-python/3/email/message.py +++ b/lib-python/3/email/message.py @@ -10,14 +10,14 @@ import uu import base64 import binascii +import warnings from io import BytesIO, StringIO # Intrapackage imports from email import utils from email import errors -from email._policybase import compat32 +from email import header from email import charset as _charset -from email._encoded_words import decode_b Charset = _charset.Charset SEMISPACE = '; ' @@ -26,6 +26,24 @@ # existence of which force quoting of the parameter value. tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') +# How to figure out if we are processing strings that come from a byte +# source with undecodable characters. +_has_surrogates = re.compile( + '([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search + + +# Helper functions +def _sanitize_header(name, value): + # If the header value contains surrogates, return a Header using + # the unknown-8bit charset to encode the bytes as encoded words. + if not isinstance(value, str): + # Assume it is already a header object + return value + if _has_surrogates(value): + return header.Header(value, charset=_charset.UNKNOWN8BIT, + header_name=name) + else: + return value def _splitparam(param): # Split header parameters. BAW: this may be too simple. It isn't @@ -118,8 +136,7 @@ you must use the explicit API to set or get all the headers. Not all of the mapping methods are implemented. """ - def __init__(self, policy=compat32): - self.policy = policy + def __init__(self): self._headers = [] self._unixfrom = None self._payload = None @@ -229,7 +246,7 @@ cte = str(self.get('content-transfer-encoding', '')).lower() # payload may be bytes here. if isinstance(payload, str): - if utils._has_surrogates(payload): + if _has_surrogates(payload): bpayload = payload.encode('ascii', 'surrogateescape') if not decode: try: @@ -250,12 +267,11 @@ if cte == 'quoted-printable': return utils._qdecode(bpayload) elif cte == 'base64': - # XXX: this is a bit of a hack; decode_b should probably be factored - # out somewhere, but I haven't figured out where yet. - value, defects = decode_b(b''.join(bpayload.splitlines())) - for defect in defects: - self.policy.handle_defect(self, defect) - return value + try: + return base64.b64decode(bpayload) + except binascii.Error: + # Incorrect padding + return bpayload elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): in_file = BytesIO(bpayload) out_file = BytesIO() @@ -275,17 +291,7 @@ Optional charset sets the message's default character set. See set_charset() for details. """ - if hasattr(payload, 'encode'): - if charset is None: - self._payload = payload - return - if not isinstance(charset, Charset): - charset = Charset(charset) - payload = payload.encode(charset.output_charset) - if hasattr(payload, 'decode'): - self._payload = payload.decode('ascii', 'surrogateescape') - else: - self._payload = payload + self._payload = payload if charset is not None: self.set_charset(charset) @@ -324,16 +330,7 @@ try: cte(self) except TypeError: - # This 'if' is for backward compatibility, it allows unicode - # through even though that won't work correctly if the - # message is serialized. - payload = self._payload - if payload: - try: - payload = payload.encode('ascii', 'surrogateescape') - except UnicodeError: - payload = payload.encode(charset.output_charset) - self._payload = charset.body_encode(payload) + self._payload = charset.body_encode(self._payload) self.add_header('Content-Transfer-Encoding', cte) def get_charset(self): @@ -365,17 +362,7 @@ Note: this does not overwrite an existing header with the same field name. Use __delitem__() first to delete any existing headers. """ - max_count = self.policy.header_max_count(name) - if max_count: - lname = name.lower() - found = 0 - for k, v in self._headers: - if k.lower() == lname: - found += 1 - if found >= max_count: - raise ValueError("There may be at most {} {} headers " - "in a message".format(max_count, name)) - self._headers.append(self.policy.header_store_parse(name, val)) + self._headers.append((name, val)) def __delitem__(self, name): """Delete all occurrences of a header, if present. @@ -414,8 +401,7 @@ Any fields deleted and re-inserted are always appended to the header list. """ - return [self.policy.header_fetch_parse(k, v) - for k, v in self._headers] + return [_sanitize_header(k, v) for k, v in self._headers] def items(self): """Get all the message's header fields and values. @@ -425,8 +411,7 @@ Any fields deleted and re-inserted are always appended to the header list. """ - return [(k, self.policy.header_fetch_parse(k, v)) - for k, v in self._headers] + return [(k, _sanitize_header(k, v)) for k, v in self._headers] def get(self, name, failobj=None): """Get a header value. @@ -437,29 +422,10 @@ name = name.lower() for k, v in self._headers: if k.lower() == name: - return self.policy.header_fetch_parse(k, v) + return _sanitize_header(k, v) return failobj # - # "Internal" methods (public API, but only intended for use by a parser - # or generator, not normal application code. - # - - def set_raw(self, name, value): From noreply at buildbot.pypy.org Sat Apr 26 12:28:51 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 26 Apr 2014 12:28:51 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Undo all changes in lib-python/3 Message-ID: <20140426102851.A7C8D1C1008@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r71002:8cfe11f35dae Date: 2014-04-26 12:19 +0200 http://bitbucket.org/pypy/pypy/changeset/8cfe11f35dae/ Log: Undo all changes in lib-python/3 diff too long, truncating to 2000 out of 61393 lines diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -71,6 +71,7 @@ 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', + 'MetavarTypeHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', @@ -164,6 +165,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -335,7 +338,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -419,7 +422,8 @@ # produce all arg strings elif not action.option_strings: - part = self._format_args(action, action.dest) + default = self._get_default_metavar_for_positional(action) + part = self._format_args(action, default) # if it's in a group, strip the outer [] if action in group_actions: @@ -441,7 +445,7 @@ # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: - default = action.dest.upper() + default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) @@ -474,7 +478,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -482,7 +486,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -527,7 +531,8 @@ def _format_action_invocation(self, action): if not action.option_strings: - metavar, = self._metavar_formatter(action, action.dest)(1) + default = self._get_default_metavar_for_positional(action) + metavar, = self._metavar_formatter(action, default)(1) return metavar else: @@ -541,7 +546,7 @@ # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: - default = action.dest.upper() + default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) @@ -619,6 +624,12 @@ def _get_help_string(self, action): return action.help + def _get_default_metavar_for_optional(self, action): + return action.dest.upper() + + def _get_default_metavar_for_positional(self, action): + return action.dest + class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. @@ -628,7 +639,7 @@ """ def _fill_text(self, text, width, indent): - return ''.join([indent + line for line in text.splitlines(True)]) + return ''.join(indent + line for line in text.splitlines(keepends=True)) class RawTextHelpFormatter(RawDescriptionHelpFormatter): @@ -659,6 +670,22 @@ return help +class MetavarTypeHelpFormatter(HelpFormatter): + """Help message formatter which uses the argument 'type' as the default + metavar value (instead of the argument 'dest') + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_default_metavar_for_optional(self, action): + return action.type.__name__ + + def _get_default_metavar_for_positional(self, action): + return action.type.__name__ + + + # ===================== # Options and Arguments # ===================== @@ -1554,7 +1581,6 @@ usage=None, description=None, epilog=None, - version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', @@ -1563,14 +1589,6 @@ conflict_handler='error', add_help=True): - if version is not None: - import warnings - warnings.warn( - """The "version" argument to ArgumentParser is deprecated. """ - """Please use """ - """"add_argument(..., action='version', version="N", ...)" """ - """instead""", DeprecationWarning) - superinit = super(ArgumentParser, self).__init__ superinit(description=description, prefix_chars=prefix_chars, @@ -1584,7 +1602,6 @@ self.prog = prog self.usage = usage self.epilog = epilog - self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help @@ -1599,7 +1616,7 @@ return string self.register('type', None, identity) - # add help and version arguments if necessary + # add help argument if necessary # (using explicit default to override global argument_default) default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] if self.add_help: @@ -1607,12 +1624,6 @@ default_prefix+'h', default_prefix*2+'help', action='help', default=SUPPRESS, help=_('show this help message and exit')) - if self.version: - self.add_argument( - default_prefix+'v', default_prefix*2+'version', - action='version', default=SUPPRESS, - version=self.version, - help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: @@ -1632,7 +1643,6 @@ 'prog', 'usage', 'description', - 'version', 'formatter_class', 'conflict_handler', 'add_help', @@ -1952,29 +1962,29 @@ # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) - # if we didn't use all the Positional objects, there were too few - # arg strings supplied. - if positionals: - self.error(_('too few arguments')) - - # make sure all required actions were present, and convert defaults. + # make sure all required actions were present and also convert + # action defaults which were not given as arguments + required_actions = [] for action in self._actions: if action not in seen_actions: if action.required: - name = _get_action_name(action) - self.error(_('argument %s is required') % name) + required_actions.append(_get_action_name(action)) else: # Convert action default now instead of doing it before # parsing arguments to avoid calling convert functions # twice (which may fail) if the argument was given, but # only if it was defined already in the namespace if (action.default is not None and - isinstance(action.default, str) and - hasattr(namespace, action.dest) and - action.default is getattr(namespace, action.dest)): + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): setattr(namespace, action.dest, self._get_value(action, action.default)) + if required_actions: + self.error(_('the following arguments are required: %s') % + ', '.join(required_actions)) + # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: @@ -2326,16 +2336,6 @@ # determine help from format above return formatter.format_help() - def format_version(self): - import warnings - warnings.warn( - 'The format_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - formatter = self._get_formatter() - formatter.add_text(self.version) - return formatter.format_help() - def _get_formatter(self): return self.formatter_class(prog=self.prog) @@ -2352,14 +2352,6 @@ file = _sys.stdout self._print_message(self.format_help(), file) - def print_version(self, file=None): - import warnings - warnings.warn( - 'The print_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - self._print_message(self.format_version(), file) - def _print_message(self, message, file=None): if message: if file is None: diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -118,7 +118,8 @@ between keys and values are surrounded by spaces. """ -from collections import MutableMapping, OrderedDict as _default_dict, _ChainMap +from collections.abc import MutableMapping +from collections import OrderedDict as _default_dict, ChainMap as _ChainMap import functools import io import itertools @@ -143,23 +144,6 @@ class Error(Exception): """Base class for ConfigParser exceptions.""" - def _get_message(self): - """Getter for 'message'; needed only to override deprecation in - BaseException. - """ - return self.__message - - def _set_message(self, value): - """Setter for 'message'; needed only to override deprecation in - BaseException. - """ - self.__message = value - - # BaseException.message has been deprecated since Python 2.6. To prevent - # DeprecationWarning from popping up over this pre-existing attribute, use - # a new property that takes lookup precedence. - message = property(_get_message, _set_message) - def __init__(self, msg=''): self.message = msg Exception.__init__(self, msg) @@ -190,7 +174,7 @@ def __init__(self, section, source=None, lineno=None): msg = [repr(section), " already exists"] if source is not None: - message = ["While reading from ", source] + message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": section ") @@ -216,7 +200,7 @@ msg = [repr(option), " in section ", repr(section), " already exists"] if source is not None: - message = ["While reading from ", source] + message = ["While reading from ", repr(source)] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": option ") @@ -302,7 +286,7 @@ raise ValueError("Required argument `source' not given.") elif filename: source = filename - Error.__init__(self, 'Source contains parsing errors: %s' % source) + Error.__init__(self, 'Source contains parsing errors: %r' % source) self.source = source self.errors = [] self.args = (source, ) @@ -338,7 +322,7 @@ def __init__(self, filename, lineno, line): Error.__init__( self, - 'File contains no section headers.\nfile: %s, line: %d\n%r' % + 'File contains no section headers.\nfile: %r, line: %d\n%r' % (filename, lineno, line)) self.source = filename self.lineno = lineno @@ -455,7 +439,7 @@ tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '$' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " - "position %d" % (value, tmp_value.find('%'))) + "position %d" % (value, tmp_value.find('$'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, @@ -959,7 +943,9 @@ # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. - if key in self._sections: + if key == self.default_section: + self._defaults.clear() + elif key in self._sections: self._sections[key].clear() self.read_dict({key: value}) @@ -1005,18 +991,26 @@ indent_level = 0 e = None # None, or an exception for lineno, line in enumerate(fp, start=1): - comment_start = None + comment_start = sys.maxsize # strip inline comments - for prefix in self._inline_comment_prefixes: - index = line.find(prefix) - if index == 0 or (index > 0 and line[index-1].isspace()): - comment_start = index - break + inline_prefixes = {p: -1 for p in self._inline_comment_prefixes} + while comment_start == sys.maxsize and inline_prefixes: + next_prefixes = {} + for prefix, index in inline_prefixes.items(): + index = line.find(prefix, index+1) + if index == -1: + continue + next_prefixes[prefix] = index + if index == 0 or (index > 0 and line[index-1].isspace()): + comment_start = min(comment_start, index) + inline_prefixes = next_prefixes # strip full line comments for prefix in self._comment_prefixes: if line.strip().startswith(prefix): comment_start = 0 break + if comment_start == sys.maxsize: + comment_start = None value = line[:comment_start].strip() if not value: if self._empty_lines_in_values: diff --git a/lib-python/3/distutils/__init__.py b/lib-python/3/distutils/__init__.py --- a/lib-python/3/distutils/__init__.py +++ b/lib-python/3/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.2.5" +__version__ = "3.3.5" #--end constants-- diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -4,10 +4,11 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re, imp +import sys, os, re from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version +from distutils.sysconfig import get_config_h_filename from distutils.dep_util import newer_group from distutils.extension import Extension from distutils.util import get_platform @@ -35,11 +36,6 @@ from distutils.ccompiler import show_compilers show_compilers() -def _get_c_extension_suffix(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - class build_ext(Command): @@ -164,6 +160,11 @@ if isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) + # If in a virtualenv, add its include directory + # Issue 16116 + if sys.exec_prefix != sys.base_exec_prefix: + self.include_dirs.append(os.path.join(sys.exec_prefix, 'include')) + # Put the Python "system" include dir at the end, so that # any local include dirs take precedence. self.include_dirs.append(py_include) @@ -193,7 +194,9 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) + if sys.base_exec_prefix != sys.prefix: # Issue 16116 + self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -201,13 +204,11 @@ # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree - if 0: - # pypy has no PC directory - self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) - if 1: - # pypy has no PCBuild directory - pass - elif MSVC_VERSION == 9: + self.include_dirs.append(os.path.dirname(get_config_h_filename())) + _sys_home = getattr(sys, '_home', None) + if _sys_home: + self.library_dirs.append(_sys_home) + if MSVC_VERSION >= 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' @@ -246,12 +247,10 @@ # building python standard extensions self.library_dirs.append('.') - # for extensions under Linux or Solaris with a shared Python library, + # For building extensions with a shared Python library, # Python's library directory must be appended to library_dirs - sysconfig.get_config_var('Py_ENABLE_SHARED') - if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu') - or sys.platform.startswith('sunos')) - and sysconfig.get_config_var('Py_ENABLE_SHARED')): + # See Issues: #1600860, #4366 + if (sysconfig.get_config_var('Py_ENABLE_SHARED')): if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) @@ -676,18 +675,10 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] - # PyPy tweak: first try to get the C extension suffix from - # 'imp'. If it fails we fall back to the 'SO' config var, like - # the previous version of this code did. This should work for - # CPython too. The point is that on PyPy with cpyext, the - # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - ext_suffix = _get_c_extension_suffix() - if ext_suffix is None: - ext_suffix = get_config_var('EXT_SUFFIX') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows + ext_suffix = get_config_var('EXT_SUFFIX') if os.name == 'nt' and self.debug: - ext_suffix = '_d.pyd' + return os.path.join(*ext_path) + '_d' + ext_suffix return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): @@ -706,17 +697,24 @@ shared extension. On most platforms, this is just 'ext.libraries'; on Windows and OS/2, we add the Python library (eg. python20.dll). """ - # For PyPy, we must not add any such Python library, on any platform - if "__pypy__" in sys.builtin_module_names: - return ext.libraries - # The python library is always needed on Windows. + # The python library is always needed on Windows. For MSVC, this + # is redundant, since the library is mentioned in a pragma in + # pyconfig.h that MSVC groks. The other Windows compilers all seem + # to need it mentioned explicitly, though, so that's what we do. + # Append '_d' to the python import library on debug builds. if sys.platform == "win32": - template = "python%d%d" - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] + from distutils.msvccompiler import MSVCCompiler + if not isinstance(self.compiler, MSVCCompiler): + template = "python%d%d" + if self.debug: + template = template + '_d' + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + # don't extend ext.libraries, it may be shared with other + # extensions, it is a reference to the original list + return ext.libraries + [pythonlib] + else: + return ext.libraries elif sys.platform == "os2emx": # EMX/GCC requires the python library explicitly, and I # believe VACPP does as well (though not confirmed) - AIM Apr01 diff --git a/lib-python/3/distutils/sysconfig.py b/lib-python/3/distutils/sysconfig.py --- a/lib-python/3/distutils/sysconfig.py +++ b/lib-python/3/distutils/sysconfig.py @@ -9,17 +9,589 @@ Email: """ +import os +import re import sys +from .errors import DistutilsPlatformError -# The content of this file is redirected from -# sysconfig_cpython or sysconfig_pypy. +# These are needed in a couple of spots, so just compute them once. +PREFIX = os.path.normpath(sys.prefix) +EXEC_PREFIX = os.path.normpath(sys.exec_prefix) +BASE_PREFIX = os.path.normpath(sys.base_prefix) +BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) -if '__pypy__' in sys.builtin_module_names: - from distutils.sysconfig_pypy import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools - from distutils.sysconfig_pypy import _variable_rx # read_setup_file() +# Path to the base directory of the project. On Windows the binary may +# live in project/PCBuild9. If we're dealing with an x64 Windows build, +# it'll live in project/PCbuild/amd64. +# set for cross builds +if "_PYTHON_PROJECT_BASE" in os.environ: + project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"]) else: - from distutils.sysconfig_cpython import * - from distutils.sysconfig_cpython import _config_vars # needed by setuptools - from distutils.sysconfig_cpython import _variable_rx # read_setup_file() + project_base = os.path.dirname(os.path.abspath(sys.executable)) +if os.name == "nt" and "pcbuild" in project_base[-8:].lower(): + project_base = os.path.abspath(os.path.join(project_base, os.path.pardir)) +# PC/VS7.1 +if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower(): + project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, + os.path.pardir)) +# PC/AMD64 +if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower(): + project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, + os.path.pardir)) + +# python_build: (Boolean) if true, we're either building Python or +# building an extension with an un-installed Python, so we use +# different (hard-wired) directories. +# Setup.local is available for Makefile builds including VPATH builds, +# Setup.dist is available on Windows +def _is_python_source_dir(d): + for fn in ("Setup.dist", "Setup.local"): + if os.path.isfile(os.path.join(d, "Modules", fn)): + return True + return False +_sys_home = getattr(sys, '_home', None) +if _sys_home and os.name == 'nt' and \ + _sys_home.lower().endswith(('pcbuild', 'pcbuild\\amd64')): + _sys_home = os.path.dirname(_sys_home) + if _sys_home.endswith('pcbuild'): # must be amd64 + _sys_home = os.path.dirname(_sys_home) +def _python_build(): + if _sys_home: + return _is_python_source_dir(_sys_home) + return _is_python_source_dir(project_base) +python_build = _python_build() + +# Calculate the build qualifier flags if they are defined. Adding the flags +# to the include and lib directories only makes sense for an installation, not +# an in-source build. +build_flags = '' +try: + if not python_build: + build_flags = sys.abiflags +except AttributeError: + # It's not a configure-based build, so the sys module doesn't have + # this attribute, which is fine. + pass + +def get_python_version(): + """Return a string containing the major and minor Python version, + leaving off the patchlevel. Sample return values could be '1.5' + or '2.2'. + """ + return sys.version[:3] + + +def get_python_inc(plat_specific=0, prefix=None): + """Return the directory containing installed Python header files. + + If 'plat_specific' is false (the default), this is the path to the + non-platform-specific header files, i.e. Python.h and so on; + otherwise, this is the path to platform-specific header files + (namely pyconfig.h). + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + """ + if prefix is None: + prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX + if os.name == "posix": + if python_build: + # Assume the executable is in the build directory. The + # pyconfig.h file should be in the same directory. Since + # the build directory may not be the source directory, we + # must use "srcdir" from the makefile to find the "Include" + # directory. + base = _sys_home or project_base + if plat_specific: + return base + if _sys_home: + incdir = os.path.join(_sys_home, get_config_var('AST_H_DIR')) + else: + incdir = os.path.join(get_config_var('srcdir'), 'Include') + return os.path.normpath(incdir) + python_dir = 'python' + get_python_version() + build_flags + return os.path.join(prefix, "include", python_dir) + elif os.name == "nt": + return os.path.join(prefix, "include") + elif os.name == "os2": + return os.path.join(prefix, "Include") + else: + raise DistutilsPlatformError( + "I don't know where Python installs its C header files " + "on platform '%s'" % os.name) + + +def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): + """Return the directory containing the Python library (standard or + site additions). + + If 'plat_specific' is true, return the directory containing + platform-specific modules, i.e. any module from a non-pure-Python + module distribution; otherwise, return the platform-shared library + directory. If 'standard_lib' is true, return the directory + containing standard Python library modules; otherwise, return the + directory for site-specific modules. + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + """ + if prefix is None: + if standard_lib: + prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX + else: + prefix = plat_specific and EXEC_PREFIX or PREFIX + + if os.name == "posix": + libpython = os.path.join(prefix, + "lib", "python" + get_python_version()) + if standard_lib: + return libpython + else: + return os.path.join(libpython, "site-packages") + elif os.name == "nt": + if standard_lib: + return os.path.join(prefix, "Lib") + else: + if get_python_version() < "2.2": + return prefix + else: + return os.path.join(prefix, "Lib", "site-packages") + elif os.name == "os2": + if standard_lib: + return os.path.join(prefix, "Lib") + else: + return os.path.join(prefix, "Lib", "site-packages") + else: + raise DistutilsPlatformError( + "I don't know where Python installs its library " + "on platform '%s'" % os.name) + + + +def customize_compiler(compiler): + """Do any platform-specific customization of a CCompiler instance. + + Mainly needed on Unix, so we can plug in the information that + varies across Unices and is stored in Python's Makefile. + """ + if compiler.compiler_type == "unix": + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' + + (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ + get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') + + if 'CC' in os.environ: + newcc = os.environ['CC'] + if (sys.platform == 'darwin' + and 'LDSHARED' not in os.environ + and ldshared.startswith(cc)): + # On OS X, if CC is overridden, use that as the default + # command for LDSHARED as well + ldshared = newcc + ldshared[len(cc):] + cc = newcc + if 'CXX' in os.environ: + cxx = os.environ['CXX'] + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + if 'AR' in os.environ: + ar = os.environ['AR'] + if 'ARFLAGS' in os.environ: + archiver = ar + ' ' + os.environ['ARFLAGS'] + else: + archiver = ar + ' ' + ar_flags + + cc_cmd = cc + ' ' + cflags + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + compiler_cxx=cxx, + linker_so=ldshared, + linker_exe=cc, + archiver=archiver) + + compiler.shared_lib_extension = shlib_suffix + + +def get_config_h_filename(): + """Return full pathname of installed pyconfig.h file.""" + if python_build: + if os.name == "nt": + inc_dir = os.path.join(_sys_home or project_base, "PC") + else: + inc_dir = _sys_home or project_base + else: + inc_dir = get_python_inc(plat_specific=1) + if get_python_version() < '2.2': + config_h = 'config.h' + else: + # The name of the config.h file changed in 2.2 + config_h = 'pyconfig.h' + return os.path.join(inc_dir, config_h) + + +def get_makefile_filename(): + """Return full pathname of installed Makefile from the Python build.""" + if python_build: + return os.path.join(_sys_home or project_base, "Makefile") + lib_dir = get_python_lib(plat_specific=0, standard_lib=1) + config_file = 'config-{}{}'.format(get_python_version(), build_flags) + return os.path.join(lib_dir, config_file, 'Makefile') + + +def parse_config_h(fp, g=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if g is None: + g = {} + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + # + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: v = int(v) + except ValueError: pass + g[n] = v + else: + m = undef_rx.match(line) + if m: + g[m.group(1)] = 0 + return g + + +# Regexes needed for parsing Makefile (and similar syntaxes, +# like old-style Setup files). +_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") +_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") +_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") + +def parse_makefile(fn, g=None): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + from distutils.text_file import TextFile + fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") + + if g is None: + g = {} + done = {} + notdone = {} + + while True: + line = fp.readline() + if line is None: # eof + break + m = _variable_rx.match(line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + # do variable interpolation here + while notdone: + for name in list(notdone): + value = notdone[name] + m = _findvar1_rx.search(value) or _findvar2_rx.search(value) + if m: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if name.startswith('PY_') and name[3:] in renamed_variables: + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + else: + done[n] = item = "" + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + del notdone[name] + + if name.startswith('PY_') \ + and name[3:] in renamed_variables: + + name = name[3:] + if name not in done: + done[name] = value + else: + # bogus variable reference; just drop it since we can't deal + del notdone[name] + + fp.close() + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + g.update(done) + return g + + +def expand_makefile_vars(s, vars): + """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in + 'string' according to 'vars' (a dictionary mapping variable names to + values). Variables not present in 'vars' are silently expanded to the + empty string. The variable values in 'vars' should not contain further + variable expansions; if 'vars' is the output of 'parse_makefile()', + you're fine. Returns a variable-expanded version of 's'. + """ + + # This algorithm does multiple expansion, so if vars['foo'] contains + # "${bar}", it will expand ${foo} to ${bar}, and then expand + # ${bar}... and so forth. This is fine as long as 'vars' comes from + # 'parse_makefile()', which takes care of such expansions eagerly, + # according to make's variable expansion semantics. + + while True: + m = _findvar1_rx.search(s) or _findvar2_rx.search(s) + if m: + (beg, end) = m.span() + s = s[0:beg] + vars.get(m.group(1)) + s[end:] + else: + break + return s + + +_config_vars = None + +def _init_posix(): + """Initialize the module as appropriate for POSIX systems.""" + g = {} + # load the installed Makefile: + try: + filename = get_makefile_filename() + parse_makefile(filename, g) + except IOError as msg: + my_msg = "invalid Python installation: unable to open %s" % filename + if hasattr(msg, "strerror"): + my_msg = my_msg + " (%s)" % msg.strerror + + raise DistutilsPlatformError(my_msg) + + # load the installed pyconfig.h: + try: + filename = get_config_h_filename() + with open(filename) as file: + parse_config_h(file, g) + except IOError as msg: + my_msg = "invalid Python installation: unable to open %s" % filename + if hasattr(msg, "strerror"): + my_msg = my_msg + " (%s)" % msg.strerror + + raise DistutilsPlatformError(my_msg) + + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if python_build: + g['LDSHARED'] = g['BLDSHARED'] + + elif get_python_version() < '2.1': + # The following two branches are for 1.5.2 compatibility. + if sys.platform == 'aix4': # what about AIX 3.x ? + # Linker script is in the config directory, not in Modules as the + # Makefile says. + python_lib = get_python_lib(standard_lib=1) + ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') + python_exp = os.path.join(python_lib, 'config', 'python.exp') + + g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp) + + global _config_vars + _config_vars = g + + +def _init_nt(): + """Initialize the module as appropriate for NT""" + g = {} + # set basic install directories + g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) + g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) + + # XXX hmmm.. a normal install puts include files here + g['INCLUDEPY'] = get_python_inc(plat_specific=0) + + g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' + g['EXE'] = ".exe" + g['VERSION'] = get_python_version().replace(".", "") + g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) + + global _config_vars + _config_vars = g + + +def _init_os2(): + """Initialize the module as appropriate for OS/2""" + g = {} + # set basic install directories + g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) + g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) + + # XXX hmmm.. a normal install puts include files here + g['INCLUDEPY'] = get_python_inc(plat_specific=0) + + g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' + g['EXE'] = ".exe" + + global _config_vars + _config_vars = g + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. Generally this includes + everything needed to build extensions and install both pure modules and + extensions. On Unix, this means every variable defined in Python's + installed Makefile; on Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _config_vars + if _config_vars is None: + func = globals().get("_init_" + os.name) + if func: + func() + else: + _config_vars = {} + + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # Distutils. + _config_vars['prefix'] = PREFIX + _config_vars['exec_prefix'] = EXEC_PREFIX + + # Always convert srcdir to an absolute path + srcdir = _config_vars.get('srcdir', project_base) + if os.name == 'posix': + if python_build: + # If srcdir is a relative path (typically '.' or '..') + # then it should be interpreted relative to the directory + # containing Makefile. + base = os.path.dirname(get_makefile_filename()) + srcdir = os.path.join(base, srcdir) + else: + # srcdir is not meaningful since the installation is + # spread about the filesystem. We choose the + # directory containing the Makefile since we know it + # exists. + srcdir = os.path.dirname(get_makefile_filename()) + _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) + + # Convert srcdir into an absolute path if it appears necessary. + # Normally it is relative to the build directory. However, during + # testing, for example, we might be running a non-installed python + # from a different directory. + if python_build and os.name == "posix": + base = project_base + if (not os.path.isabs(_config_vars['srcdir']) and + base != os.getcwd()): + # srcdir is relative and we are not in the same directory + # as the executable. Assume executable is in the build + # directory and make srcdir absolute. + srcdir = os.path.join(base, _config_vars['srcdir']) + _config_vars['srcdir'] = os.path.normpath(srcdir) + + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers + if sys.platform == 'darwin': + import _osx_support + _osx_support.customize_config_vars(_config_vars) + + if args: + vals = [] + for name in args: + vals.append(_config_vars.get(name)) + return vals + else: + return _config_vars + +def get_config_var(name): + """Return the value of a single variable using the dictionary + returned by 'get_config_vars()'. Equivalent to + get_config_vars().get(name) + """ + return get_config_vars().get(name) diff --git a/lib-python/3/distutils/sysconfig_cpython.py b/lib-python/3/distutils/sysconfig_cpython.py --- a/lib-python/3/distutils/sysconfig_cpython.py +++ b/lib-python/3/distutils/sysconfig_cpython.py @@ -146,7 +146,7 @@ "I don't know where Python installs its library " "on platform '%s'" % os.name) - +_USE_CLANG = None def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. @@ -155,28 +155,42 @@ varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": - if sys.platform == "darwin": - # Perform first-time customization of compiler-related - # config vars on OS X now that we know we need a compiler. - # This is primarily to support Pythons from binary - # installers. The kind and paths to build tools on - # the user system may vary significantly from the system - # that Python itself was built on. Also the user OS - # version and build tools may not support the same set - # of CPU architectures for universal builds. - global _config_vars - if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): - import _osx_support - _osx_support.customize_compiler(_config_vars) - _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - - (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ + (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') + 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') newcc = None if 'CC' in os.environ: - cc = os.environ['CC'] + newcc = os.environ['CC'] + elif sys.platform == 'darwin' and cc == 'gcc-4.2': + # Issue #13590: + # Since Apple removed gcc-4.2 in Xcode 4.2, we can no + # longer assume it is available for extension module builds. + # If Python was built with gcc-4.2, check first to see if + # it is available on this system; if not, try to use clang + # instead unless the caller explicitly set CC. + global _USE_CLANG + if _USE_CLANG is None: + from distutils import log + from subprocess import Popen, PIPE + p = Popen("! type gcc-4.2 && type clang && exit 2", + shell=True, stdout=PIPE, stderr=PIPE) + p.wait() + if p.returncode == 2: + _USE_CLANG = True + log.warn("gcc-4.2 not found, using clang instead") + else: + _USE_CLANG = False + if _USE_CLANG: + newcc = 'clang' + if newcc: + # On OS X, if CC is overridden, use that as the default + # command for LDSHARED as well + if (sys.platform == 'darwin' + and 'LDSHARED' not in os.environ + and ldshared.startswith(cc)): + ldshared = newcc + ldshared[len(cc):] + cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: @@ -211,7 +225,7 @@ linker_exe=cc, archiver=archiver) - compiler.shared_lib_extension = shlib_suffix + compiler.shared_lib_extension = so_ext def get_config_h_filename(): @@ -466,7 +480,6 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' - g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) @@ -486,7 +499,6 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' - g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" global _config_vars @@ -531,11 +543,43 @@ srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) - # OS X platforms require special customization to handle - # multi-architecture, multi-os-version installers if sys.platform == 'darwin': - import _osx_support - _osx_support.customize_config_vars(_config_vars) + kernel_version = os.uname()[2] # Kernel version (8.4.3) + major_version = int(kernel_version.split('.')[0]) + + if major_version < 8: + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + flags = _config_vars[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _config_vars[key] = flags + + else: + + # Allow the user to override the architecture flags using + # an environment variable. + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _config_vars[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _config_vars[key] = flags if args: vals = [] diff --git a/lib-python/3/distutils/tests/test_build_ext.py b/lib-python/3/distutils/tests/test_build_ext.py --- a/lib-python/3/distutils/tests/test_build_ext.py +++ b/lib-python/3/distutils/tests/test_build_ext.py @@ -61,9 +61,9 @@ sys.stdout = old_stdout if ALREADY_TESTED: - return + self.skipTest('Already tested in %s' % ALREADY_TESTED) else: - ALREADY_TESTED = True + ALREADY_TESTED = type(self).__name__ import xx @@ -76,8 +76,8 @@ if support.HAVE_DOCSTRINGS: doc = 'This is a template module just for instruction.' self.assertEqual(xx.__doc__, doc) - self.assertTrue(isinstance(xx.Null(), xx.Null)) - self.assertTrue(isinstance(xx.Str(), xx.Str)) + self.assertIsInstance(xx.Null(), xx.Null) + self.assertIsInstance(xx.Str(), xx.Str) def tearDown(self): # Get everything back to normal @@ -110,13 +110,9 @@ _config_vars['Py_ENABLE_SHARED'] = old_var # make sure we get some library dirs under solaris - self.assertTrue(len(cmd.library_dirs) > 0) + self.assertGreater(len(cmd.library_dirs), 0) def test_user_site(self): - # site.USER_SITE was introduced in 2.6 - if sys.version < '2.6': - return - import site dist = Distribution({'name': 'xx'}) cmd = build_ext(dist) @@ -124,7 +120,7 @@ # making sure the user option is there options = [name for name, short, lable in cmd.user_options] - self.assertTrue('user' in options) + self.assertIn('user', options) # setting a value cmd.user = 1 @@ -171,10 +167,10 @@ from distutils import sysconfig py_include = sysconfig.get_python_inc() - self.assertTrue(py_include in cmd.include_dirs) + self.assertIn(py_include, cmd.include_dirs) plat_py_include = sysconfig.get_python_inc(plat_specific=1) - self.assertTrue(plat_py_include in cmd.include_dirs) + self.assertIn(plat_py_include, cmd.include_dirs) # make sure cmd.libraries is turned into a list # if it's a string @@ -255,13 +251,13 @@ 'some': 'bar'})] cmd.check_extensions_list(exts) ext = exts[0] - self.assertTrue(isinstance(ext, Extension)) + self.assertIsInstance(ext, Extension) # check_extensions_list adds in ext the values passed # when they are in ('include_dirs', 'library_dirs', 'libraries' # 'extra_objects', 'extra_compile_args', 'extra_link_args') self.assertEqual(ext.libraries, 'foo') - self.assertTrue(not hasattr(ext, 'some')) + self.assertFalse(hasattr(ext, 'some')) # 'macros' element of build info dict must be 1- or 2-tuple exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', diff --git a/lib-python/3/distutils/tests/test_sysconfig.py b/lib-python/3/distutils/tests/test_sysconfig.py --- a/lib-python/3/distutils/tests/test_sysconfig.py +++ b/lib-python/3/distutils/tests/test_sysconfig.py @@ -50,15 +50,41 @@ def test_get_config_vars(self): cvars = sysconfig.get_config_vars() - self.assertTrue(isinstance(cvars, dict)) + self.assertIsInstance(cvars, dict) self.assertTrue(cvars) + def test_srcdir(self): + # See Issues #15322, #15364. + srcdir = sysconfig.get_config_var('srcdir') + + self.assertTrue(os.path.isabs(srcdir), srcdir) + self.assertTrue(os.path.isdir(srcdir), srcdir) + + if sysconfig.python_build: + # The python executable has not been installed so srcdir + # should be a full source checkout. + Python_h = os.path.join(srcdir, 'Include', 'Python.h') + self.assertTrue(os.path.exists(Python_h), Python_h) + self.assertTrue(sysconfig._is_python_source_dir(srcdir)) + elif os.name == 'posix': + self.assertEqual(os.path.dirname(sysconfig.get_makefile_filename()), + srcdir) + + def test_srcdir_independent_of_cwd(self): + # srcdir should be independent of the current working directory + # See Issues #15322, #15364. + srcdir = sysconfig.get_config_var('srcdir') + cwd = os.getcwd() + try: + os.chdir('..') + srcdir2 = sysconfig.get_config_var('srcdir') + finally: + os.chdir(cwd) + self.assertEqual(srcdir, srcdir2) + + @unittest.skipUnless(get_default_compiler() == 'unix', + 'not testing if default compiler is not unix') def test_customize_compiler(self): - - # not testing if default compiler is not unix - if get_default_compiler() != 'unix': - return - os.environ['AR'] = 'my_ar' os.environ['ARFLAGS'] = '-arflags' @@ -121,7 +147,7 @@ import sysconfig as global_sysconfig if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'): - return + self.skipTest('compiler flags customized') self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), sysconfig.get_config_var('LDSHARED')) self.assertEqual(global_sysconfig.get_config_var('CC'), sysconfig.get_config_var('CC')) diff --git a/lib-python/3/email/charset.py b/lib-python/3/email/charset.py --- a/lib-python/3/email/charset.py +++ b/lib-python/3/email/charset.py @@ -194,7 +194,7 @@ header encoding. Charset.SHORTEST is not allowed for body_encoding. - output_charset: Some character sets must be converted before the can be + output_charset: Some character sets must be converted before they can be used in email headers or bodies. If the input_charset is one of them, this attribute will contain the name of the charset output will be converted to. Otherwise, it will @@ -386,7 +386,8 @@ string using the ascii codec produces the correct string version of the content. """ - # 7bit/8bit encodings return the string unchanged (module conversions) + if not string: + return string if self.body_encoding is BASE64: if isinstance(string, str): string = string.encode(self.output_charset) @@ -398,13 +399,9 @@ # character set, then, we must turn it into pseudo bytes via the # latin1 charset, which will encode any byte as a single code point # between 0 and 255, which is what body_encode is expecting. - # - # Note that this clause doesn't handle the case of a _payload that - # is already bytes. It never did, and the semantics of _payload - # being bytes has never been nailed down, so fixing that is a - # longer term TODO. if isinstance(string, str): - string = string.encode(self.output_charset).decode('latin1') + string = string.encode(self.output_charset) + string = string.decode('latin1') return email.quoprimime.body_encode(string) else: if isinstance(string, str): diff --git a/lib-python/3/email/encoders.py b/lib-python/3/email/encoders.py --- a/lib-python/3/email/encoders.py +++ b/lib-python/3/email/encoders.py @@ -71,16 +71,8 @@ msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' - if not isinstance(orig, str): - msg.set_payload(orig.decode('ascii', 'surrogateescape')) def encode_noop(msg): """Do nothing.""" - # Well, not quite *nothing*: in Python3 we have to turn bytes into a string - # in our internal surrogateescaped form in order to keep the model - # consistent. - orig = msg.get_payload() - if not isinstance(orig, str): - msg.set_payload(orig.decode('ascii', 'surrogateescape')) diff --git a/lib-python/3/email/generator.py b/lib-python/3/email/generator.py --- a/lib-python/3/email/generator.py +++ b/lib-python/3/email/generator.py @@ -12,9 +12,12 @@ import random import warnings +from copy import deepcopy from io import StringIO, BytesIO +from email._policybase import compat32 from email.header import Header -from email.message import _has_surrogates +from email.utils import _has_surrogates +import email.charset as _charset UNDERSCORE = '_' NL = '\n' # XXX: no longer used by the code below. @@ -33,7 +36,8 @@ # Public interface # - def __init__(self, outfp, mangle_from_=True, maxheaderlen=78): + def __init__(self, outfp, mangle_from_=True, maxheaderlen=None, *, + policy=None): """Create the generator for message flattening. outfp is the output file-like object for writing the message to. It @@ -49,16 +53,22 @@ defined in the Header class. Set maxheaderlen to zero to disable header wrapping. The default is 78, as recommended (but not required) by RFC 2822. + + The policy keyword specifies a policy object that controls a number of + aspects of the generator's operation. The default policy maintains + backward compatibility. + """ self._fp = outfp self._mangle_from_ = mangle_from_ - self._maxheaderlen = maxheaderlen + self.maxheaderlen = maxheaderlen + self.policy = policy def write(self, s): # Just delegate to the file object self._fp.write(s) - def flatten(self, msg, unixfrom=False, linesep='\n'): + def flatten(self, msg, unixfrom=False, linesep=None): r"""Print the message object tree rooted at msg to the output file specified when the Generator instance was created. @@ -70,29 +80,47 @@ Note that for subobjects, no From_ line is printed. linesep specifies the characters used to indicate a new line in - the output. The default value is the most useful for typical - Python applications, but it can be set to \r\n to produce RFC-compliant - line separators when needed. + the output. The default value is determined by the policy. """ # We use the _XXX constants for operating on data that comes directly # from the msg, and _encoded_XXX constants for operating on data that # has already been converted (to bytes in the BytesGenerator) and # inserted into a temporary buffer. - self._NL = linesep - self._encoded_NL = self._encode(linesep) + policy = msg.policy if self.policy is None else self.policy + if linesep is not None: + policy = policy.clone(linesep=linesep) + if self.maxheaderlen is not None: + policy = policy.clone(max_line_length=self.maxheaderlen) + self._NL = policy.linesep + self._encoded_NL = self._encode(self._NL) self._EMPTY = '' self._encoded_EMTPY = self._encode('') - if unixfrom: - ufrom = msg.get_unixfrom() - if not ufrom: - ufrom = 'From nobody ' + time.ctime(time.time()) - self.write(ufrom + self._NL) - self._write(msg) + # Because we use clone (below) when we recursively process message + # subparts, and because clone uses the computed policy (not None), + # submessages will automatically get set to the computed policy when + # they are processed by this code. + old_gen_policy = self.policy + old_msg_policy = msg.policy + try: + self.policy = policy + msg.policy = policy + if unixfrom: + ufrom = msg.get_unixfrom() + if not ufrom: + ufrom = 'From nobody ' + time.ctime(time.time()) + self.write(ufrom + self._NL) + self._write(msg) + finally: + self.policy = old_gen_policy + msg.policy = old_msg_policy def clone(self, fp): """Clone this generator with the exact same options.""" - return self.__class__(fp, self._mangle_from_, self._maxheaderlen) + return self.__class__(fp, + self._mangle_from_, + None, # Use policy setting, which we've adjusted + policy=self.policy) # # Protected interface - undocumented ;/ @@ -146,10 +174,18 @@ # necessary. oldfp = self._fp try: + self._munge_cte = None self._fp = sfp = self._new_buffer() self._dispatch(msg) finally: self._fp = oldfp + munge_cte = self._munge_cte + del self._munge_cte + # If we munged the cte, copy the message again and re-fix the CTE. + if munge_cte: + msg = deepcopy(msg) + msg.replace_header('content-transfer-encoding', munge_cte[0]) + msg.replace_header('content-type', munge_cte[1]) # Write the headers. First we see if the message object wants to # handle that itself. If not, we'll do it generically. meth = getattr(msg, '_write_headers', None) @@ -180,16 +216,8 @@ # def _write_headers(self, msg): - for h, v in msg.items(): - self.write('%s: ' % h) - if isinstance(v, Header): - self.write(v.encode( - maxlinelen=self._maxheaderlen, linesep=self._NL)+self._NL) - else: - # Header's got lots of smarts, so use it. - header = Header(v, maxlinelen=self._maxheaderlen, - header_name=h) - self.write(header.encode(linesep=self._NL)+self._NL) + for h, v in msg.raw_items(): + self.write(self.policy.fold(h, v)) # A blank line always separates headers from body self.write(self._NL) @@ -206,9 +234,14 @@ if _has_surrogates(msg._payload): charset = msg.get_param('charset') if charset is not None: + # XXX: This copy stuff is an ugly hack to avoid modifying the + # existing message. + msg = deepcopy(msg) del msg['content-transfer-encoding'] msg.set_payload(payload, charset) payload = msg.get_payload() + self._munge_cte = (msg['content-transfer-encoding'], + msg['content-type']) if self._mangle_from_: payload = fcre.sub('>From ', payload) self._write_lines(payload) @@ -266,9 +299,8 @@ # body-part self._fp.write(body_part) # close-delimiter transport-padding - self.write(self._NL + '--' + boundary + '--') + self.write(self._NL + '--' + boundary + '--' + self._NL) if msg.epilogue is not None: - self.write(self._NL) if self._mangle_from_: epilogue = fcre.sub('>From ', msg.epilogue) else: @@ -279,12 +311,12 @@ # The contents of signed parts has to stay unmodified in order to keep # the signature intact per RFC1847 2.1, so we disable header wrapping. # RDM: This isn't enough to completely preserve the part, but it helps. - old_maxheaderlen = self._maxheaderlen + p = self.policy + self.policy = p.clone(max_line_length=0) try: - self._maxheaderlen = 0 self._handle_multipart(msg) finally: - self._maxheaderlen = old_maxheaderlen + self.policy = p def _handle_message_delivery_status(self, msg): # We can't just write the headers directly to self's file object @@ -319,16 +351,18 @@ # message/rfc822. Such messages are generated by, for example, # Groupwise when forwarding unadorned messages. (Issue 7970.) So # in that case we just emit the string body. - payload = msg.get_payload() + payload = msg._payload if isinstance(payload, list): g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL) payload = s.getvalue() + else: + payload = self._encode(payload) self._fp.write(payload) # This used to be a module level function; we use a classmethod for this # and _compile_re so we can continue to provide the module level function # for backward compatibility by doing - # _make_boudary = Generator._make_boundary + # _make_boundary = Generator._make_boundary # at the end of the module. It *is* internal, so we could drop that... @classmethod def _make_boundary(cls, text=None): @@ -358,7 +392,10 @@ Functionally identical to the base Generator except that the output is bytes and not string. When surrogates were used in the input to encode - bytes, these are decoded back to bytes for output. + bytes, these are decoded back to bytes for output. If the policy has + cte_type set to 7bit, then the message is transformed such that the + non-ASCII bytes are properly content transfer encoded, using the charset + unknown-8bit. The outfp object must accept bytes in its write method. """ @@ -379,23 +416,8 @@ def _write_headers(self, msg): # This is almost the same as the string version, except for handling # strings with 8bit bytes. - for h, v in msg._headers: - self.write('%s: ' % h) - if isinstance(v, Header): - self.write(v.encode(maxlinelen=self._maxheaderlen)+self._NL) - elif _has_surrogates(v): - # If we have raw 8bit data in a byte string, we have no idea - # what the encoding is. There is no safe way to split this - # string. If it's ascii-subset, then we could do a normal - # ascii split, but if it's multibyte then we could break the - # string. There's no way to know so the least harm seems to - # be to not split the string and risk it being too long. - self.write(v+NL) - else: - # Header's got lots of smarts and this string is safe... - header = Header(v, maxlinelen=self._maxheaderlen, - header_name=h) - self.write(header.encode(linesep=self._NL)+self._NL) + for h, v in msg.raw_items(): + self._fp.write(self.policy.fold_binary(h, v)) # A blank line always separates headers from body self.write(self._NL) @@ -404,7 +426,7 @@ # just write it back out. if msg._payload is None: return - if _has_surrogates(msg._payload): + if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit': if self._mangle_from_: msg._payload = fcre.sub(">From ", msg._payload) self._write_lines(msg._payload) diff --git a/lib-python/3/email/message.py b/lib-python/3/email/message.py --- a/lib-python/3/email/message.py +++ b/lib-python/3/email/message.py @@ -10,14 +10,14 @@ import uu import base64 import binascii -import warnings from io import BytesIO, StringIO # Intrapackage imports from email import utils from email import errors -from email import header +from email._policybase import compat32 from email import charset as _charset +from email._encoded_words import decode_b Charset = _charset.Charset SEMISPACE = '; ' @@ -26,24 +26,6 @@ # existence of which force quoting of the parameter value. tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') -# How to figure out if we are processing strings that come from a byte -# source with undecodable characters. -_has_surrogates = re.compile( - '([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search - - -# Helper functions -def _sanitize_header(name, value): - # If the header value contains surrogates, return a Header using - # the unknown-8bit charset to encode the bytes as encoded words. - if not isinstance(value, str): - # Assume it is already a header object - return value - if _has_surrogates(value): - return header.Header(value, charset=_charset.UNKNOWN8BIT, - header_name=name) - else: - return value def _splitparam(param): # Split header parameters. BAW: this may be too simple. It isn't @@ -136,7 +118,8 @@ you must use the explicit API to set or get all the headers. Not all of the mapping methods are implemented. """ - def __init__(self): + def __init__(self, policy=compat32): + self.policy = policy self._headers = [] self._unixfrom = None self._payload = None @@ -246,7 +229,7 @@ cte = str(self.get('content-transfer-encoding', '')).lower() # payload may be bytes here. if isinstance(payload, str): - if _has_surrogates(payload): + if utils._has_surrogates(payload): bpayload = payload.encode('ascii', 'surrogateescape') if not decode: try: @@ -267,11 +250,12 @@ if cte == 'quoted-printable': return utils._qdecode(bpayload) elif cte == 'base64': - try: - return base64.b64decode(bpayload) - except binascii.Error: - # Incorrect padding - return bpayload + # XXX: this is a bit of a hack; decode_b should probably be factored + # out somewhere, but I haven't figured out where yet. + value, defects = decode_b(b''.join(bpayload.splitlines())) + for defect in defects: + self.policy.handle_defect(self, defect) + return value elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): in_file = BytesIO(bpayload) out_file = BytesIO() @@ -291,7 +275,17 @@ Optional charset sets the message's default character set. See set_charset() for details. """ - self._payload = payload + if hasattr(payload, 'encode'): + if charset is None: + self._payload = payload + return + if not isinstance(charset, Charset): + charset = Charset(charset) + payload = payload.encode(charset.output_charset) + if hasattr(payload, 'decode'): + self._payload = payload.decode('ascii', 'surrogateescape') + else: + self._payload = payload if charset is not None: self.set_charset(charset) @@ -330,7 +324,16 @@ try: cte(self) except TypeError: - self._payload = charset.body_encode(self._payload) + # This 'if' is for backward compatibility, it allows unicode + # through even though that won't work correctly if the + # message is serialized. + payload = self._payload + if payload: + try: + payload = payload.encode('ascii', 'surrogateescape') + except UnicodeError: + payload = payload.encode(charset.output_charset) + self._payload = charset.body_encode(payload) self.add_header('Content-Transfer-Encoding', cte) def get_charset(self): @@ -362,7 +365,17 @@ Note: this does not overwrite an existing header with the same field name. Use __delitem__() first to delete any existing headers. """ - self._headers.append((name, val)) + max_count = self.policy.header_max_count(name) + if max_count: + lname = name.lower() + found = 0 + for k, v in self._headers: + if k.lower() == lname: + found += 1 + if found >= max_count: + raise ValueError("There may be at most {} {} headers " + "in a message".format(max_count, name)) + self._headers.append(self.policy.header_store_parse(name, val)) def __delitem__(self, name): """Delete all occurrences of a header, if present. @@ -401,7 +414,8 @@ Any fields deleted and re-inserted are always appended to the header list. """ - return [_sanitize_header(k, v) for k, v in self._headers] + return [self.policy.header_fetch_parse(k, v) + for k, v in self._headers] def items(self): """Get all the message's header fields and values. @@ -411,7 +425,8 @@ Any fields deleted and re-inserted are always appended to the header list. """ - return [(k, _sanitize_header(k, v)) for k, v in self._headers] + return [(k, self.policy.header_fetch_parse(k, v)) + for k, v in self._headers] def get(self, name, failobj=None): """Get a header value. @@ -422,10 +437,29 @@ name = name.lower() for k, v in self._headers: if k.lower() == name: - return _sanitize_header(k, v) + return self.policy.header_fetch_parse(k, v) return failobj # + # "Internal" methods (public API, but only intended for use by a parser + # or generator, not normal application code. + # + + def set_raw(self, name, value): + """Store name and value in the model without modification. + + This is an "internal" API, intended only for use by a parser. + """ + self._headers.append((name, value)) + + def raw_items(self): + """Return the (name, value) header pairs without modification. + + This is an "internal" API, intended only for use by a generator. + """ + return iter(self._headers.copy()) + + # # Additional useful stuff # @@ -442,7 +476,7 @@ name = name.lower() for k, v in self._headers: if k.lower() == name: - values.append(_sanitize_header(k, v)) + values.append(self.policy.header_fetch_parse(k, v)) if not values: return failobj return values @@ -475,7 +509,7 @@ parts.append(_formatparam(k.replace('_', '-'), v)) if _value is not None: parts.insert(0, _value) - self._headers.append((_name, SEMISPACE.join(parts))) + self[_name] = SEMISPACE.join(parts) def replace_header(self, _name, _value): """Replace a header. @@ -487,7 +521,7 @@ _name = _name.lower() for i, (k, v) in zip(range(len(self._headers)), self._headers): if k.lower() == _name: - self._headers[i] = (k, _value) + self._headers[i] = self.policy.header_store_parse(k, _value) break else: raise KeyError(_name) @@ -619,7 +653,7 @@ If your application doesn't care whether the parameter was RFC 2231 encoded, it can turn the return value into a string as follows: - param = msg.get_param('foo') + rawparam = msg.get_param('foo') param = email.utils.collapse_rfc2231_value(rawparam) """ @@ -803,7 +837,8 @@ parts.append(k) else: parts.append('%s=%s' % (k, v)) - newheaders.append((h, SEMISPACE.join(parts))) + val = SEMISPACE.join(parts) + newheaders.append(self.policy.header_store_parse(h, val)) else: newheaders.append((h, v)) diff --git a/lib-python/3/email/parser.py b/lib-python/3/email/parser.py --- a/lib-python/3/email/parser.py +++ b/lib-python/3/email/parser.py @@ -4,18 +4,19 @@ """A parser of RFC 2822 and MIME email messages.""" -__all__ = ['Parser', 'HeaderParser', 'BytesParser'] +__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser'] import warnings from io import StringIO, TextIOWrapper from email.feedparser import FeedParser, BytesFeedParser from email.message import Message +from email._policybase import compat32 From noreply at buildbot.pypy.org Sat Apr 26 12:28:52 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 26 Apr 2014 12:28:52 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Same trick as ad57911bdbc4, allows to import ctypes.util. Message-ID: <20140426102852.CC4781C1008@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r71003:70fa7b2907b1 Date: 2014-04-26 12:26 +0200 http://bitbucket.org/pypy/pypy/changeset/70fa7b2907b1/ Log: Same trick as ad57911bdbc4, allows to import ctypes.util. diff --git a/lib-python/3/ctypes/util.py b/lib-python/3/ctypes/util.py --- a/lib-python/3/ctypes/util.py +++ b/lib-python/3/ctypes/util.py @@ -85,9 +85,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) From noreply at buildbot.pypy.org Sat Apr 26 14:55:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 14:55:46 +0200 (CEST) Subject: [pypy-commit] pypy default: (cfbolz, arigo) Message-ID: <20140426125546.5B8B71D2948@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71004:5d641e103b00 Date: 2014-04-26 14:54 +0200 http://bitbucket.org/pypy/pypy/changeset/5d641e103b00/ Log: (cfbolz, arigo) Pfff. Found and fixed the issue with unrolling logic. diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -5884,6 +5884,25 @@ """ self.optimize_loop(ops, expected) + def test_bug_unroll(self): + ops = """ + [p0] + i2 = getfield_gc_pure(p0, descr=immut_intval) + p1 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p1, 1242, descr=immut_intval) + jump(p1) + """ + preamble = """ + [p0] + i2 = getfield_gc_pure(p0, descr=immut_intval) + jump() + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected, preamble) + def test_immutable_constantfold_recursive(self): ops = """ [] diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -180,10 +180,11 @@ self.optimizer.clear_newoperations() for i in range(len(original_jump_args)): + srcbox = jump_args[i] if values[i].is_virtual(): - values[i].force_box(self.optimizer) - if original_jump_args[i] is not jump_args[i]: - op = ResOperation(rop.SAME_AS, [jump_args[i]], original_jump_args[i]) + srcbox = values[i].force_box(self.optimizer) + if original_jump_args[i] is not srcbox: + op = ResOperation(rop.SAME_AS, [srcbox], original_jump_args[i]) self.optimizer.emit_operation(op) inputarg_setup_ops = self.optimizer.get_newoperations() From noreply at buildbot.pypy.org Sat Apr 26 14:58:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 14:58:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Rename the test to something less generic Message-ID: <20140426125854.A1A941D2949@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71005:924305e0dcf6 Date: 2014-04-26 14:58 +0200 http://bitbucket.org/pypy/pypy/changeset/924305e0dcf6/ Log: Rename the test to something less generic diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -5884,7 +5884,7 @@ """ self.optimize_loop(ops, expected) - def test_bug_unroll(self): + def test_bug_unroll_with_immutables(self): ops = """ [p0] i2 = getfield_gc_pure(p0, descr=immut_intval) From noreply at buildbot.pypy.org Sat Apr 26 17:06:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 26 Apr 2014 17:06:04 +0200 (CEST) Subject: [pypy-commit] pypy default: mark XRangeStepOneIterator.stop as an immutable field Message-ID: <20140426150604.948CF1C1149@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71006:f32e46e00e4f Date: 2014-04-26 11:04 -0400 http://bitbucket.org/pypy/pypy/changeset/f32e46e00e4f/ Log: mark XRangeStepOneIterator.stop as an immutable field diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -436,6 +436,7 @@ ) W_XRange.typedef.acceptable_as_base_class = False + class W_XRangeIterator(W_Root): def __init__(self, space, current, remaining, step): self.space = space @@ -483,7 +484,10 @@ ) W_XRangeIterator.typedef.acceptable_as_base_class = False + class W_XRangeStepOneIterator(W_XRangeIterator): + _immutable_fields_ = ['stop'] + def __init__(self, space, start, stop): self.space = space self.current = start diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -20,8 +20,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i58 = getfield_gc(p18, descr=) - i59 = getfield_gc(p18, descr=) - i60 = int_lt(i58, i59) + i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) p62 = getfield_gc(ConstPtr(ptr37), descr=) From noreply at buildbot.pypy.org Sat Apr 26 18:38:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 18:38:20 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Minimum work to pass test_double_abort_markers_cb_write_write Message-ID: <20140426163820.C194D1C0E7C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1182:64af26e96549 Date: 2014-04-26 18:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/64af26e96549/ Log: Minimum work to pass test_double_abort_markers_cb_write_write diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -99,7 +99,8 @@ static void contention_management(uint8_t other_segment_num, - enum contention_kind_e kind) + enum contention_kind_e kind, + object_t *obj) { assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); @@ -182,6 +183,8 @@ else if (!contmgr.abort_other) { dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; + if (kind == WRITE_WRITE_CONTENTION) + lookup_other_thread_recorded_marker(other_segment_num, obj); abort_with_mutex(); } @@ -259,7 +262,8 @@ } } -static void write_write_contention_management(uintptr_t lock_idx) +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj) { s_mutex_lock(); @@ -270,7 +274,7 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION); + contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -281,10 +285,10 @@ static void write_read_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, WRITE_READ_CONTENTION); + contention_management(other_segment_num, WRITE_READ_CONTENTION, NULL); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION); + contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); } diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,5 +1,6 @@ -static void write_write_contention_management(uintptr_t lock_idx); +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj); static void write_read_contention_management(uint8_t other_segment_num); static void inevitable_contention_management(uint8_t other_segment_num); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -76,9 +76,15 @@ assert(lock_idx < sizeof(write_locks)); retry: if (write_locks[lock_idx] == 0) { + /* A lock to prevent reading garbage from + lookup_other_thread_recorded_marker() */ + acquire_segment_lock(STM_SEGMENT->segment_base); + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], - 0, lock_num))) + 0, lock_num))) { + release_segment_lock(STM_SEGMENT->segment_base); goto retry; + } dprintf_test(("write_slowpath %p -> mod_old\n", obj)); @@ -93,6 +99,8 @@ list_append2(STM_PSEGMENT->modified_old_objects_markers, marker[0], marker[1]); + release_segment_lock(STM_SEGMENT->segment_base); + /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ @@ -134,7 +142,7 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - write_write_contention_management(lock_idx); + write_write_contention_management(lock_idx, obj); goto retry; } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -156,6 +156,10 @@ /* For sleeping contention management */ bool signal_when_done; + /* When we mutate 'modified_old_objects' but we don't have the + global mutex, we must acquire this lock. */ + uint8_t segment_lock; + /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; @@ -169,6 +173,7 @@ /* Temporarily stores the marker information */ char marker_self[_STM_MARKER_LEN]; + char marker_other[_STM_MARKER_LEN]; }; enum /* safe_point */ { @@ -238,3 +243,17 @@ static void copy_object_to_shared(object_t *obj, int source_segment_num); static void synchronize_object_now(object_t *obj); + +static inline void acquire_segment_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->segment_lock); + spinlock_acquire(*lock); +} + +static inline void release_segment_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->segment_lock); + spinlock_release(*lock); +} diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -79,6 +79,46 @@ tl->longest_marker_state = attribute_to; tl->longest_marker_time = time; memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); } pseg->marker_self[0] = 0; } + +static void lookup_other_thread_recorded_marker(uint8_t other_segment_num, + object_t *obj) +{ + struct stm_priv_segment_info_s *my_pseg, *other_pseg; + char *other_segment_base = get_segment_base(other_segment_num); + acquire_segment_lock(other_segment_base); + assert(_has_mutex()); + STM_PSEGMENT->marker_other[0] = 0; + + /* here, we acquired the other thread's segment_lock, which means that: + + (1) it has finished filling 'modified_old_objects' after it sets + up the write_locks[] value that we're conflicting with + + (2) it is not mutating 'modified_old_objects' right now (we have + the global mutex_lock at this point too). + */ + + other_pseg = get_priv_segment(other_segment_num); + long i; + struct list_s *mlst = other_pseg->modified_old_objects; + struct list_s *mlstm = other_pseg->modified_old_objects_markers; + for (i = list_count(mlst); --i >= 0; ) { + if (list_item(mlst, i) == (uintptr_t)obj) { + uintptr_t marker[2]; + assert(list_count(mlstm) == 2 * list_count(mlst)); + marker[0] = list_item(mlstm, i * 2 + 0); + marker[1] = list_item(mlstm, i * 2 + 1); + + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + marker_expand(marker, other_pseg->pub.segment_base, + my_pseg->marker_other); + break; + } + } + + release_segment_lock(other_segment_base); +} diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -6,3 +6,5 @@ static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); +static void lookup_other_thread_recorded_marker(uint8_t other_segment_num, + object_t *obj); diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -175,7 +175,7 @@ assert ffi.string(raw).startswith('29 ') assert seen == [29] - def test_double_abort_markers_cb(self): + def test_double_abort_markers_cb_write_write(self): @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") def expand_marker(base, number, ptr, outbuf, outbufsize): s = '%d\x00' % (number,) From noreply at buildbot.pypy.org Sat Apr 26 18:58:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 18:58:26 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Self/other marker in case of inevitable/inevitable conflict Message-ID: <20140426165826.13D911C3396@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1183:2a4a393ac14c Date: 2014-04-26 18:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/2a4a393ac14c/ Log: Self/other marker in case of inevitable/inevitable conflict diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -184,7 +184,9 @@ dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; if (kind == WRITE_WRITE_CONTENTION) - lookup_other_thread_recorded_marker(other_segment_num, obj); + marker_lookup_other_thread_write_write(other_segment_num, obj); + else if (kind == INEVITABLE_CONTENTION) + marker_lookup_other_thread_inev(other_segment_num); abort_with_mutex(); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -211,6 +211,11 @@ change_timing_state(STM_TIME_RUN_CURRENT); STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->safe_point = SP_RUNNING; +#ifndef NDEBUG + STM_PSEGMENT->marker_inev[1] = 99999999999999999L; +#endif + if (jmpbuf == NULL) + marker_fetch_inev(); STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; @@ -727,6 +732,7 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); + marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -174,6 +174,7 @@ /* Temporarily stores the marker information */ char marker_self[_STM_MARKER_LEN]; char marker_other[_STM_MARKER_LEN]; + uintptr_t marker_inev[2]; }; enum /* safe_point */ { diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -428,6 +428,10 @@ for (i = list_count(lst); i > 0; i -= 2) { mark_visit_object((object_t *)list_item(lst, i - 1), base); } + if (get_priv_segment(j)->transaction_state == TS_INEVITABLE) { + uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; + mark_visit_object((object_t *)marker_inev_obj, base); + } } } diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -84,8 +84,8 @@ pseg->marker_self[0] = 0; } -static void lookup_other_thread_recorded_marker(uint8_t other_segment_num, - object_t *obj) +static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, + object_t *obj) { struct stm_priv_segment_info_s *my_pseg, *other_pseg; char *other_segment_base = get_segment_base(other_segment_num); @@ -122,3 +122,23 @@ release_segment_lock(other_segment_base); } + +static void marker_lookup_other_thread_inev(uint8_t other_segment_num) +{ + /* same as marker_lookup_other_thread_write_write(), but for + an inevitable contention instead of a write-write contention */ + struct stm_priv_segment_info_s *my_pseg, *other_pseg; + assert(_has_mutex()); + other_pseg = get_priv_segment(other_segment_num); + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + marker_expand(other_pseg->marker_inev, other_pseg->pub.segment_base, + my_pseg->marker_other); +} + +static void marker_fetch_inev(void) +{ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->marker_inev[0] = marker[0]; + STM_PSEGMENT->marker_inev[1] = marker[1]; +} diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -6,5 +6,7 @@ static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); -static void lookup_other_thread_recorded_marker(uint8_t other_segment_num, - object_t *obj); +static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, + object_t *obj); +static void marker_lookup_other_thread_inev(uint8_t other_segment_num); +static void marker_fetch_inev(void); diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -203,3 +203,31 @@ assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE assert ffi.string(tl.longest_marker_self) == '21' assert ffi.string(tl.longest_marker_other) == '19' + + def test_double_abort_markers_cb_inevitable(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + self.become_inevitable() + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + py.test.raises(Conflict, self.become_inevitable) + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_INEVITABLE + assert ffi.string(tl.longest_marker_self) == '21' + assert ffi.string(tl.longest_marker_other) == '19' From noreply at buildbot.pypy.org Sat Apr 26 19:18:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 19:18:13 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Report correctly write-read contention. Message-ID: <20140426171813.5A4971C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1184:5cc5bccf74cd Date: 2014-04-26 19:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/5cc5bccf74cd/ Log: Report correctly write-read contention. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -187,6 +187,8 @@ marker_lookup_other_thread_write_write(other_segment_num, obj); else if (kind == INEVITABLE_CONTENTION) marker_lookup_other_thread_inev(other_segment_num); + else if (kind == WRITE_READ_CONTENTION) + marker_lookup_same_thread_write_read(obj); abort_with_mutex(); } @@ -285,9 +287,10 @@ s_mutex_unlock(); } -static void write_read_contention_management(uint8_t other_segment_num) +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj) { - contention_management(other_segment_num, WRITE_READ_CONTENTION, NULL); + contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); } static void inevitable_contention_management(uint8_t other_segment_num) diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,7 +1,8 @@ static void write_write_contention_management(uintptr_t lock_idx, object_t *obj); -static void write_read_contention_management(uint8_t other_segment_num); +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj); static void inevitable_contention_management(uint8_t other_segment_num); static inline bool is_abort(uintptr_t nursery_end) { diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -288,7 +288,7 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - write_read_contention_management(i); + write_read_contention_management(i, item); /* If we reach this point, we didn't abort, but maybe we had to wait for the other thread to commit. If we diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -82,6 +82,28 @@ memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); } pseg->marker_self[0] = 0; + pseg->marker_other[0] = 0; +} + +static void marker_lookup_from_thread(struct stm_priv_segment_info_s *pseg, + object_t *obj, char *outmarker) +{ + outmarker[0] = 0; + + long i; + struct list_s *mlst = pseg->modified_old_objects; + struct list_s *mlstm = pseg->modified_old_objects_markers; + for (i = list_count(mlst); --i >= 0; ) { + if (list_item(mlst, i) == (uintptr_t)obj) { + uintptr_t marker[2]; + assert(list_count(mlstm) == 2 * list_count(mlst)); + marker[0] = list_item(mlstm, i * 2 + 0); + marker[1] = list_item(mlstm, i * 2 + 1); + + marker_expand(marker, pseg->pub.segment_base, outmarker); + break; + } + } } static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, @@ -91,7 +113,6 @@ char *other_segment_base = get_segment_base(other_segment_num); acquire_segment_lock(other_segment_base); assert(_has_mutex()); - STM_PSEGMENT->marker_other[0] = 0; /* here, we acquired the other thread's segment_lock, which means that: @@ -101,24 +122,10 @@ (2) it is not mutating 'modified_old_objects' right now (we have the global mutex_lock at this point too). */ + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + other_pseg = get_priv_segment(other_segment_num); - other_pseg = get_priv_segment(other_segment_num); - long i; - struct list_s *mlst = other_pseg->modified_old_objects; - struct list_s *mlstm = other_pseg->modified_old_objects_markers; - for (i = list_count(mlst); --i >= 0; ) { - if (list_item(mlst, i) == (uintptr_t)obj) { - uintptr_t marker[2]; - assert(list_count(mlstm) == 2 * list_count(mlst)); - marker[0] = list_item(mlstm, i * 2 + 0); - marker[1] = list_item(mlstm, i * 2 + 1); - - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); - marker_expand(marker, other_pseg->pub.segment_base, - my_pseg->marker_other); - break; - } - } + marker_lookup_from_thread(other_pseg, obj, my_pseg->marker_other); release_segment_lock(other_segment_base); } @@ -135,6 +142,14 @@ my_pseg->marker_other); } +static void marker_lookup_same_thread_write_read(object_t *obj) +{ + struct stm_priv_segment_info_s *my_pseg; + + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + marker_lookup_from_thread(my_pseg, obj, my_pseg->marker_self); +} + static void marker_fetch_inev(void) { uintptr_t marker[2]; diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -9,4 +9,5 @@ static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, object_t *obj); static void marker_lookup_other_thread_inev(uint8_t other_segment_num); +static void marker_lookup_same_thread_write_read(object_t *obj); static void marker_fetch_inev(void); diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -80,7 +80,7 @@ assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER assert 0.099 <= tl.longest_marker_time <= 0.9 assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) - assert tl.longest_marker_other[0] == '\x00' + assert ffi.string(tl.longest_marker_other) == '' def test_macros(self): self.start_transaction() @@ -231,3 +231,31 @@ assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_INEVITABLE assert ffi.string(tl.longest_marker_self) == '21' assert ffi.string(tl.longest_marker_other) == '19' + + def test_read_write_contention(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + assert stm_get_char(p) == '\x00' + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'A') + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + py.test.raises(Conflict, self.commit_transaction) + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ + assert ffi.string(tl.longest_marker_self) == '19' + assert ffi.string(tl.longest_marker_other) == '' From noreply at buildbot.pypy.org Sat Apr 26 19:20:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Apr 2014 19:20:12 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Add the stripped down version of timelog.txt from the abandoned timelog Message-ID: <20140426172012.8488B1C023E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1185:e256c296de87 Date: 2014-04-26 19:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/e256c296de87/ Log: Add the stripped down version of timelog.txt from the abandoned timelog branch. diff --git a/c7/doc/marker.txt b/c7/doc/marker.txt new file mode 100644 --- /dev/null +++ b/c7/doc/marker.txt @@ -0,0 +1,42 @@ + +Reports +======= + +- self-abort: + WRITE_WRITE_CONTENTION, INEVITABLE_CONTENTION: + marker in both threads, time lost by this thread + WRITE_READ_CONTENTION: + marker pointing back to the write, time lost by this thread + +- aborted by a different thread: + WRITE_WRITE_CONTENTION: + marker in both threads, time lost by this thread + WRITE_READ_CONTENTION: + remote marker pointing back to the write, time lost by this thread + (no local marker available to know where we've read the object from) + INEVITABLE_CONTENTION: + n/a + +- self-pausing: + same as self-abort, but reporting the time lost by pausing + +- waiting for a free segment: + - if we're waiting because of inevitability, report with a + marker and the time lost + - if we're just waiting because of no free segment, don't report it, + or maybe with only the total time lost and no marker + +- more internal reasons for cond_wait(), like synchronizing the threads, + should all be resolved quickly and are unlikely worth a report + + +Internal Measurements +===================== + +- use clock_gettime(CLOCK_MONOTONIC), it seems to be the fastest way + (less than 5 times slower than a RDTSC instruction, which is itself + not safe in the presence of threads migrating among CPUs) + +- record only the highest-time entry. The user of the library is + responsible for getting and clearing it often enough if it wants + more details. From noreply at buildbot.pypy.org Sat Apr 26 20:58:51 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 26 Apr 2014 20:58:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Failing test for issue1743 Message-ID: <20140426185851.94A1C1C023E@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71007:a726eef8da83 Date: 2014-04-26 11:57 -0700 http://bitbucket.org/pypy/pypy/changeset/a726eef8da83/ Log: Failing test for issue1743 diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -289,6 +289,24 @@ greenlet(f).switch() + def test_exc_info_save_restore2(self): + import sys + from greenlet import greenlet + + result = [] + + def f(): + result.append(sys.exc_info()) + + g = greenlet(f) + try: + 1 / 0 + except ZeroDivisionError: + g.switch() + + assert result == [(None, None, None)] + + def test_gr_frame(self): from greenlet import greenlet import sys From noreply at buildbot.pypy.org Sat Apr 26 20:58:53 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 26 Apr 2014 20:58:53 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140426185853.BCBE41C023E@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71008:bd1d5bf88f29 Date: 2014-04-26 11:58 -0700 http://bitbucket.org/pypy/pypy/changeset/bd1d5bf88f29/ Log: merged upstream diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + with fp: + imp.load_module('_testcapi', fp, filename, description) except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -583,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -15,10 +15,11 @@ space.setitem(w_modules, w_main, mainmodule) return mainmodule + def compilecode(space, source, filename, cmd='exec'): w = space.wrap - w_code = space.builtin.call('compile', - w(source), w(filename), w(cmd), w(0), w(0)) + w_code = space.builtin.call( + 'compile', w(source), w(filename), w(cmd), w(0), w(0)) pycode = space.interp_w(eval.Code, w_code) return pycode @@ -28,7 +29,7 @@ cmd = 'eval' else: cmd = 'exec' - + try: if space is None: from pypy.objspace.std import StdObjSpace @@ -55,18 +56,22 @@ operationerr.record_interpreter_traceback() raise + def run_string(source, filename=None, space=None): _run_eval_string(source, filename, space, False) + def eval_string(source, filename=None, space=None): return _run_eval_string(source, filename, space, True) + def run_file(filename, space=None): - if __name__=='__main__': + if __name__ == '__main__': print "Running %r with %r" % (filename, space) istring = open(filename).read() run_string(istring, filename, space) + def run_module(module_name, args, space=None): """Implements PEP 338 'Executing modules as scripts', overwriting sys.argv[1:] using `args` and executing the module `module_name`. @@ -89,7 +94,6 @@ return space.call_function(w_run_module, w(module_name), space.w_None, w('__main__'), space.w_True) -# ____________________________________________________________ def run_toplevel(space, f, verbose=False): """Calls f() and handle all OperationErrors. diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -22,22 +22,6 @@ compile; if absent or zero these statements do influence the compilation, in addition to any features explicitly specified. """ - - ast_node = None - w_ast_type = space.gettypeobject(ast.AST.typedef) - str_ = None - if space.isinstance_w(w_source, w_ast_type): - ast_node = space.interp_w(ast.mod, w_source) - ast_node.sync_app_attrs(space) - elif space.isinstance_w(w_source, space.w_unicode): - w_utf_8_source = space.call_method(w_source, "encode", - space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) - # This flag tells the parser to reject any coding cookies it sees. - flags |= consts.PyCF_SOURCE_IS_UTF8 - else: - str_ = space.str_w(w_source) - ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): @@ -53,14 +37,30 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - if ast_node is None: - if flags & consts.PyCF_ONLY_AST: - mod = ec.compiler.compile_to_ast(str_, filename, mode, flags) - return space.wrap(mod) - else: - code = ec.compiler.compile(str_, filename, mode, flags) + w_ast_type = space.gettypeobject(ast.AST.typedef) + if space.isinstance_w(w_source, w_ast_type): + ast_node = space.interp_w(ast.mod, w_source) + ast_node.sync_app_attrs(space) + code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + return space.wrap(code) + + if space.isinstance_w(w_source, space.w_unicode): + w_utf_8_source = space.call_method(w_source, "encode", + space.wrap("utf-8")) + str_ = space.str_w(w_utf_8_source) + # This flag tells the parser to reject any coding cookies it sees. + flags |= consts.PyCF_SOURCE_IS_UTF8 else: - code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + str_ = space.readbuf_w(w_source).as_str() + + if '\x00' in str_: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) + + if flags & consts.PyCF_ONLY_AST: + code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + else: + code = ec.compiler.compile(str_, filename, mode, flags) return space.wrap(code) diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -351,17 +351,17 @@ self.promote_step = promote_step def descr_new(space, w_subtype, w_start, w_stop=None, w_step=None): - start = _toint(space, w_start) + start = space.int_w(w_start) if space.is_none(w_step): # no step argument provided step = 1 promote_step = True else: - step = _toint(space, w_step) + step = space.int_w(w_step) promote_step = False if space.is_none(w_stop): # only 1 argument provided start, stop = 0, start else: - stop = _toint(space, w_stop) + stop = space.int_w(w_stop) howmany = get_len_of_range(space, start, stop, step) obj = space.allocate_instance(W_XRange, w_subtype) W_XRange.__init__(obj, space, start, howmany, step, promote_step) @@ -425,11 +425,6 @@ minint = -sys.maxint - 1 return minint if last < minint - step else last + step -def _toint(space, w_obj): - # this also supports float arguments. CPython still does, too. - # needs a bit more thinking in general... - return space.int_w(space.int(w_obj)) - W_XRange.typedef = TypeDef("xrange", __new__ = interp2app(W_XRange.descr_new.im_func), __repr__ = interp2app(W_XRange.descr_repr), @@ -441,6 +436,7 @@ ) W_XRange.typedef.acceptable_as_base_class = False + class W_XRangeIterator(W_Root): def __init__(self, space, current, remaining, step): self.space = space @@ -488,7 +484,10 @@ ) W_XRangeIterator.typedef.acceptable_as_base_class = False + class W_XRangeStepOneIterator(W_XRangeIterator): + _immutable_fields_ = ['stop'] + def __init__(self, space, start, stop): self.space = space self.current = start diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -311,14 +311,14 @@ def test_xrange_len(self): x = xrange(33) assert len(x) == 33 - x = xrange(33.2) - assert len(x) == 33 + exc = raises(TypeError, xrange, 33.2) + assert "integer" in str(exc.value) x = xrange(33,0,-1) assert len(x) == 33 x = xrange(33,0) assert len(x) == 0 - x = xrange(33,0.2) - assert len(x) == 0 + exc = raises(TypeError, xrange, 33, 0.2) + assert "integer" in str(exc.value) x = xrange(0,33) assert len(x) == 33 x = xrange(0,33,-1) @@ -490,6 +490,14 @@ def test_compile(self): co = compile('1+2', '?', 'eval') assert eval(co) == 3 + co = compile(buffer('1+2'), '?', 'eval') + assert eval(co) == 3 + exc = raises(TypeError, compile, chr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, unichr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, memoryview('1+2'), '?', 'eval') + assert str(exc.value) == "expected a readable buffer object" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') raises(ValueError, compile, '"\\xt"', '?', 'eval') diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -1,5 +1,4 @@ class AppTestMap: - def test_trivial_map_one_seq(self): assert map(lambda x: x+2, [1, 2, 3, 4]) == [3, 4, 5, 6] @@ -77,6 +76,7 @@ assert result == [(2, 7), (1, 6), (None, 5), (None, 4), (None, 3), (None, 2), (None, 1)] + class AppTestZip: def test_one_list(self): assert zip([1,2,3]) == [(1,), (2,), (3,)] @@ -93,6 +93,7 @@ yield None assert zip(Foo()) == [] + class AppTestReduce: def test_None(self): raises(TypeError, reduce, lambda x, y: x+y, [1,2,3], None) @@ -105,6 +106,7 @@ assert reduce(lambda x, y: x-y, [10, 2, 8]) == 0 assert reduce(lambda x, y: x-y, [2, 8], 10) == 0 + class AppTestFilter: def test_None(self): assert filter(None, ['a', 'b', 1, 0, None]) == ['a', 'b', 1] @@ -125,6 +127,7 @@ return i * 10 assert filter(lambda x: x != 20, T("abcd")) == (0, 10, 30) + class AppTestXRange: def test_xrange(self): x = xrange(2, 9, 3) @@ -155,7 +158,8 @@ assert list(xrange(0, 10, A())) == [0, 5] def test_xrange_float(self): - assert list(xrange(0.1, 2.0, 1.1)) == [0, 1] + exc = raises(TypeError, xrange, 0.1, 2.0, 1.1) + assert "integer" in str(exc.value) def test_xrange_long(self): import sys @@ -218,6 +222,7 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) + class AppTestApply: def test_apply(self): def f(*args, **kw): @@ -228,6 +233,7 @@ assert apply(f, args) == (args, {}) assert apply(f, args, kw) == (args, kw) + class AppTestAllAny: """ These are copied directly and replicated from the Python 2.5 source code. @@ -277,6 +283,7 @@ S = [10, 20, 30] assert any([x > 42 for x in S]) == False + class AppTestMinMax: def test_min(self): assert min(1, 2) == 1 diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -25,5 +25,6 @@ assert str(b) == "\x00xy" + "\x00" * 7 b[4:8:2] = 'zw' assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 - b[6:10] = u'#' - assert str(b) == "\x00xy\x00z\x00#" + "\x00" * 3 + r = str(buffer(u'#')) + b[6:6+len(r)] = u'#' + assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -679,7 +679,7 @@ if space.isinstance_w(w_string, space.w_unicode): return space.newtuple([w_string, space.len(w_string)]) - string = space.str_w(w_string) + string = space.readbuf_w(w_string).as_str() if len(string) == 0: return space.newtuple([space.wrap(u''), space.wrap(0)]) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -276,7 +276,7 @@ assert enc == "a\x00\x00\x00" def test_unicode_internal_decode(self): - import sys + import sys, _codecs, array if sys.maxunicode == 65535: # UCS2 build if sys.byteorder == "big": bytes = "\x00a" @@ -291,6 +291,9 @@ bytes2 = "\x98\x00\x01\x00" assert bytes2.decode("unicode_internal") == u"\U00010098" assert bytes.decode("unicode_internal") == u"a" + assert _codecs.unicode_internal_decode(array.array('c', bytes))[0] == u"a" + exc = raises(TypeError, _codecs.unicode_internal_decode, memoryview(bytes)) + assert str(exc.value) == "expected a readable buffer object" def test_raw_unicode_escape(self): assert unicode("\u0663", "raw-unicode-escape") == u"\u0663" diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -460,14 +460,17 @@ space = self.space self.check_closed() - w_iterator = space.iter(w_lines) - while True: - try: - w_line = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done + lines = space.fixedview(w_lines) + for i, w_line in enumerate(lines): + if not space.isinstance_w(w_line, space.w_str): + try: + line = w_line.charbuf_w(space) + except TypeError: + raise OperationError(space.w_TypeError, space.wrap( + "writelines() argument must be a sequence of strings")) + else: + lines[i] = space.wrap(line) + for w_line in lines: self.file_write(w_line) def file_readinto(self, w_rwbuffer): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -386,6 +386,32 @@ assert len(somelines) > 200 assert somelines == lines[:len(somelines)] + def test_writelines(self): + import array + fn = self.temptestfile + with file(fn, 'w') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'r').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + exc = raises(TypeError, f.writelines, ['abc', memoryview('def')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == [] + def test_nasty_writelines(self): # The stream lock should be released between writes fn = self.temptestfile diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -193,7 +193,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.bufferstr_w(w_value) + value = space.str_w(w_value) if start + len(value) != stop: raise OperationError(space.w_ValueError, space.wrap("cannot resize array")) diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError, wrap_windowserror +from pypy.interpreter.error import OperationError, wrap_windowserror, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32 from rpython.rlib.rarithmetic import r_uint, intmask @@ -327,7 +327,14 @@ buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') buf[0] = '\0' else: - value = space.bufferstr_w(w_value) + try: + value = w_value.readbuf_w(space) + except TypeError: + raise oefmt(space.w_TypeError, + "Objects of type '%T' can not be used as binary " + "registry values", w_value) + else: + value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -137,11 +137,15 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx + from _winreg import CreateKey, SetValueEx, REG_BINARY key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) + exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, + REG_BINARY, memoryview('abc')) + assert str(exc.value) == ("Objects of type 'memoryview' can not " + "be used as binary registry values") def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -25,6 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', + 'Template' : 'pythonify.CppyyTemplateType', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj, space.BUF_SIMPLE) + buf = space.getarg_w('s*', w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.getarg_w('s*', w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.getarg_w('s*', w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -450,8 +450,8 @@ class CPPConstructor(CPPMethod): """Method dispatcher that constructs new objects. This method can not have - a fast path, a the allocation of the object is currently left to the - reflection layer only, b/c the C++ class may have an overloaded operator + a fast path, as the allocation of the object is currently left to the + reflection layer only, since the C++ class may have an overloaded operator new, disallowing malloc here.""" _immutable_ = True @@ -460,8 +460,18 @@ # TODO: these casts are very, very un-pretty; need to find a way of # re-using CPPMethod's features w/o these roundabouts vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - w_result = CPPMethod.call(self, vscope, args_w) + cppinstance = None + try: + cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) + use_args_w = args_w[1:] + except (OperationError, TypeError), e: + use_args_w = args_w + w_result = CPPMethod.call(self, vscope, use_args_w) newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + if cppinstance: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return args_w[0] return wrap_cppobject(self.space, newthis, self.scope, do_cast=False, python_owns=True, fresh=True) @@ -1141,10 +1151,14 @@ self.objects = rweakref.RWeakValueDictionary(int, W_CPPInstance) def register(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, obj) def unregister(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, None) @@ -1194,7 +1208,7 @@ w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created - if not fresh: + if not fresh and rawobject: obj = memory_regulator.retrieve(rawobject) if obj is not None and obj.cppclass is cppclass: return obj diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -25,9 +25,12 @@ # class CppyyClass defined in _init_pythonify() class CppyyTemplateType(object): - def __init__(self, scope, name): - self._scope = scope + def __init__(self, name, scope=None): self._name = name + if scope is None: + self._scope = gbl + else: + self._scope = scope def _arg_to_str(self, arg): if arg == str: @@ -143,7 +146,12 @@ raise TypeError(msg) else: def __new__(cls, *args): - return constructor_overload.call(None, *args) + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -206,7 +214,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(scope, template_name) + return CppyyTemplateType(template_name, scope) def get_pycppitem(scope, name): @@ -424,7 +432,9 @@ __metaclass__ = CppyyClassMeta def __init__(self, *args, **kwds): - pass # ignored, for the C++ backend, ctor == __new__ + __init__ + # self is only a placeholder; now create the actual C++ object + args = (self,) + args + self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -3,12 +3,23 @@ #include #include +#include #include #include #include #include +// add example01.cxx code +int globalAddOneToInt(int a); + +namespace dummy { +#include "example01.cxx" +} + +int globalAddOneToInt(int a) { + return dummy::globalAddOneToInt(a); +} /* pseudo-reflection data ------------------------------------------------- */ namespace { @@ -38,28 +49,16 @@ typedef std::map Scopes_t; static Scopes_t s_scopes; -class PseudoExample01 { -public: - PseudoExample01() : m_somedata(-99) {} - PseudoExample01(int a) : m_somedata(a) {} - PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} - PseudoExample01& operator=(const PseudoExample01& e) { - if (this != &e) m_somedata = e.m_somedata; - return *this; - } - virtual ~PseudoExample01() {} - -public: - int m_somedata; -}; - static int example01_last_static_method = 0; static int example01_last_constructor = 0; +static int payload_methods_offset = 0; struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- static long s_scope_id = 0; + + { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; @@ -115,14 +114,81 @@ // cut-off is used in cppyy_is_constructor example01_last_constructor = methods.size(); - // (12) double addDataToDouble(double a) + // (12) int addDataToInt(int a) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + + // (13) int addDataToIntConstRef(const int& a) + argtypes.clear(); + argtypes.push_back("const int&"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + + // (14) int overloadedAddDataToInt(int a, int b) + argtypes.clear(); + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (15) int overloadedAddDataToInt(int a) + // (16) int overloadedAddDataToInt(int a, int b, int c) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + + // (17) double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + // (18) int addDataToAtoi(const char* str) + // (19) char* addToStringValue(const char* str) + argtypes.clear(); + argtypes.push_back("const char*"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + + // (20) void setPayload(payload* p) + // (21) payload* cyclePayload(payload* p) + // (22) payload copyCyclePayload(payload* p) + argtypes.clear(); + argtypes.push_back("payload*"); + methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + + payload_methods_offset = methods.size(); + Cppyy_PseudoClassInfo info(methods); s_scopes[(cppyy_scope_t)s_scope_id] = info; - // -- class example01 + } // -- class example01 + + { // class payload -- + s_handles["payload"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // (23) payload(double d = 0.) + std::vector argtypes; + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + + // (24) double getData() + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + + // (25) void setData(double d) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + + Cppyy_PseudoClassInfo info(methods); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class payload } } _init; @@ -150,36 +216,69 @@ return s_handles[scope_name]; // lookup failure will return 0 (== error) } +cppyy_type_t cppyy_actual_class(cppyy_type_t klass, cppyy_object_t /* obj */) { + return klass; +} + /* memory management ------------------------------------------------------ */ void cppyy_destruct(cppyy_type_t handle, cppyy_object_t self) { if (handle == s_handles["example01"]) - delete (PseudoExample01*)self; + delete (dummy::example01*)self; } /* method/function dispatching -------------------------------------------- */ +void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + switch ((long)method) { + case 5: // static void example01:;staticSetPayload(payload* p, double d) + assert(!self && nargs == 2); + dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 9: // static void example01::setCount(int) + assert(!self && nargs == 1); + dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); + break; + case 20: // void example01::setPayload(payload* p); + assert(self && nargs == 1); + ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_v"); + break; + } +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; switch ((long)method) { - case 1: // static int staticAddOneToInt(int) + case 1: // static int example01::staticAddOneToInt(int) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.in + 1; + result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); break; - case 2: // static int staticAddOneToInt(int, int) + case 2: // static int example01::staticAddOneToInt(int, int) assert(!self && nargs == 2); - result = ((CPPYY_G__value*)args)[0].obj.in + ((CPPYY_G__value*)args)[1].obj.in + 1; + result = dummy::example01::staticAddOneToInt( + ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); break; - case 3: // static int staticAtoi(const char* str) + case 3: // static int example01::staticAtoi(const char* str) assert(!self && nargs == 1); - result = ::atoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); break; - case 8: // static int getCount() - assert(!self && nargs == 0); - // can't actually call this method (would need to resolve example01::count), but - // other than the memory tests, most tests just check for 0 at the end - result = 0; - break; + case 8: // static int example01::getCount() + assert(!self && nargs == 0); + result = dummy::example01::getCount(); + break; + case 12: // int example01::addDataToInt(int a) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); + break; + case 18: // int example01::addDataToAtoi(const char* str) + assert(self && nargs == 1); + result = ((dummy::example01*)self)->addDataToAtoi( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; default: assert(!"method unknown in cppyy_call_i"); break; @@ -188,26 +287,50 @@ } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - if ((long)method == 4) { // static char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return (long)strout; + long result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = (long)dummy::example01::staticStrcpy( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + assert(!self && nargs == 2); + result = (long)dummy::example01::staticCyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), + ((CPPYY_G__value*)args)[1].obj.d); + break; + case 19: // char* example01::addToStringValue(const char* str) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->addToStringValue( + (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + case 21: // payload* example01::cyclePayload(payload* p) + assert(self && nargs == 1); + result = (long)((dummy::example01*)self)->cyclePayload( + (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_l"); + break; } - assert(!"method unknown in cppyy_call_l"); - return 0; + return result; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; switch ((long)method) { - case 0: // static double staticAddToDouble(double) + case 0: // static double example01::staticAddToDouble(double) assert(!self && nargs == 1); - result = ((CPPYY_G__value*)args)[0].obj.d + 0.01; + result = dummy::example01::staticAddToDouble(((CPPYY_G__value*)args)[0].obj.d); break; - case 12: // double addDataToDouble(double a) + case 17: // double example01::addDataToDouble(double a) assert(self && nargs == 1); - result = ((PseudoExample01*)self)->m_somedata + ((CPPYY_G__value*)args)[0].obj.d; + result = ((dummy::example01*)self)->addDataToDouble(((CPPYY_G__value*)args)[0].obj.d); + break; + case 24: // double payload::getData() + assert(self && nargs == 0); + result = ((dummy::payload*)self)->getData(); break; default: assert(!"method unknown in cppyy_call_d"); @@ -217,11 +340,17 @@ } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - // char* staticStrcpy(const char* strin) - const char* strin = (const char*)(*(long*)&((CPPYY_G__value*)args)[0]); - char* strout = (char*)malloc(::strlen(strin)+1); - ::strcpy(strout, strin); - return strout; + char* result = 0; + switch ((long)method) { + case 4: // static char* example01::staticStrcpy(const char* strin) + assert(!self && nargs == 1); + result = dummy::example01::staticStrcpy((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + break; + default: + assert(!"method unknown in cppyy_call_s"); + break; + } + return result; } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { @@ -230,17 +359,27 @@ switch ((long)method) { case 10: assert(nargs == 0); - result = new PseudoExample01; + result = new dummy::example01; break; case 11: assert(nargs == 1); - result = new PseudoExample01(((CPPYY_G__value*)args)[0].obj.in); + result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); break; default: - assert(!"method unknown in cppyy_constructor"); + assert(!"method of example01 unknown in cppyy_constructor"); break; } - } + } else if (handle == s_handles["payload"]) { + switch ((long)method) { + case 23: + if (nargs == 0) result = new dummy::payload; + else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + break; + default: + assert(!"method payload unknown in cppyy_constructor"); + break; + } + } return (cppyy_object_t)result; } @@ -346,8 +485,13 @@ return 0; } -cppyy_method_t cppyy_get_method(cppyy_scope_t /* handle */, cppyy_index_t method_index) { - return (cppyy_method_t)method_index; +cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { + if (handle == s_handles["example01"]) + return (cppyy_method_t)method_index; + else if (handle == s_handles["payload"]) + return (cppyy_method_t)((long)method_index + payload_methods_offset); + assert(!"unknown class in cppyy_get_method"); + return (cppyy_method_t)0; } @@ -356,12 +500,16 @@ if (handle == s_handles["example01"]) return example01_last_static_method <= method_index && method_index < example01_last_constructor; + else if (handle == s_handles["payload"]) + return (long)method_index == 0; return 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { if (handle == s_handles["example01"]) return method_index < example01_last_static_method ? 1 : 0; + if (handle == s_handles["payload"]) + return 0; return 1; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -8,13 +8,18 @@ # run only tests that are covered by the dummy backend and tests # that do not rely on reflex if not ('test_helper.py' in item.location[0] or \ - 'test_cppyy.py' in item.location[0]): + 'test_cppyy.py' in item.location[0] or \ + 'test_pythonify.py' in item.location[0]): py.test.skip("genreflex is not installed") import re - if 'test_cppyy.py' in item.location[0] and \ - not re.search("test0[1-36]", item.location[2]): + if 'test_pythonify.py' in item.location[0] and \ + not re.search("AppTestPYTHONIFY.test0[1-6]", item.location[2]): py.test.skip("genreflex is not installed") +def pytest_ignore_collect(path, config): + if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: + return True # "can't run dummy tests in -A" + def pytest_configure(config): if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi @@ -22,6 +27,9 @@ import ctypes ctypes.CDLL(lcapi.reflection_library) except Exception, e: + if config.option.runappdirect: + return # "can't run dummy tests in -A" + # build dummy backend (which has reflex info and calls hard-wired) import os from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -32,10 +40,11 @@ pkgpath = py.path.local(__file__).dirpath().join(os.pardir) srcpath = pkgpath.join('src') incpath = pkgpath.join('include') + tstpath = pkgpath.join('test') eci = ExternalCompilationInfo( separate_module_files=[srcpath.join('dummy_backend.cxx')], - include_dirs=[incpath], + include_dirs=[incpath, tstpath], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/test/example01.cxx b/pypy/module/cppyy/test/example01.cxx --- a/pypy/module/cppyy/test/example01.cxx +++ b/pypy/module/cppyy/test/example01.cxx @@ -1,4 +1,3 @@ -#include #include #include #include diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -321,7 +321,7 @@ e = cppyy.gbl.example01(2) assert 5 == meth(e, 3) - def test01_installable_function(self): + def test15_installable_function(self): """Test installing and calling global C++ function as python method""" import cppyy @@ -332,6 +332,33 @@ assert 2 == e.fresh(1) assert 3 == e.fresh(2) + def test16_subclassing(self): + """A sub-class on the python side should have that class as type""" + + import cppyy + example01 = cppyy.gbl.example01 + + o = example01() + assert type(o) == example01 + + class MyClass1(example01): + def myfunc(self): + return 1 + + o = MyClass1() + assert type(o) == MyClass1 + assert isinstance(o, example01) + assert o.myfunc() == 1 + + class MyClass2(example01): + def __init__(self, what): + example01.__init__(self) + self.what = what + + o = MyClass2('hi') + assert type(o) == MyClass2 + assert o.what == 'hi' + class AppTestPYTHONIFY_UI: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -477,3 +477,29 @@ assert b1 == e2 assert b1 != b2 assert b1 == e2 + + +class AppTestTEMPLATE_UI: + spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + + def setup_class(cls): + cls.w_test_dct = cls.space.wrap(test_dct) + cls.w_stlstring = cls.space.appexec([], """(): + import cppyy, sys + return cppyy.load_reflection_info(%r)""" % (test_dct, )) + + def test01_explicit_templates(self): + """Explicit use of Template class""" + + import cppyy + + vector = cppyy.Template('vector', cppyy.gbl.std) + assert vector[int] == vector(int) + + v = vector[int]() + + N = 10 + v += range(N) + assert len(v) == N + for i in range(N): + assert v[i] == i diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -132,7 +132,7 @@ return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' - def buffer_w(self, w_obj): + def getarg_w(self, code, w_obj): # for retrieving buffers return FakeBuffer(w_obj) def exception_match(self, typ, sub): diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -0,0 +1,27 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestBuffers(BaseTestPyPyC): + def test_re_match(self): + def main(n): + import re + import array + p = re.compile('.+') + a = array.array('c', 'test' * 1000) + i = 0 + while i < n: + i += 1 + p.match(a) # ID: match + return i + log = self.run(main, [1000]) + assert log.result == 1000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('match', """ + guard_not_invalidated(descr=...) + i65 = getfield_gc(p18, descr=...) + i67 = int_gt(0, i65) + guard_false(i67, descr=...) + i69 = int_gt(., i65) + guard_true(i69, descr=...) + --TICK-- + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -20,8 +20,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i58 = getfield_gc(p18, descr=) - i59 = getfield_gc(p18, descr=) - i60 = int_lt(i58, i59) + i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) p62 = getfield_gc(ConstPtr(ptr37), descr=) diff --git a/pypy/module/termios/interp_termios.py b/pypy/module/termios/interp_termios.py --- a/pypy/module/termios/interp_termios.py +++ b/pypy/module/termios/interp_termios.py @@ -4,7 +4,7 @@ """ from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import wrap_oserror +from pypy.interpreter.error import wrap_oserror, OperationError from rpython.rlib import rtermios import termios @@ -19,6 +19,10 @@ @unwrap_spec(when=int) def tcsetattr(space, w_fd, when, w_attributes): fd = space.c_filedescriptor_w(w_fd) + if not space.isinstance_w(w_attributes, space.w_list) or \ + space.len_w(w_attributes) != 7: + raise OperationError(space.w_TypeError, space.wrap( + "tcsetattr, arg 3: must be 7 element list")) w_iflag, w_oflag, w_cflag, w_lflag, w_ispeed, w_ospeed, w_cc = \ space.unpackiterable(w_attributes, expected_length=7) w_builtin = space.getbuiltinmodule('__builtin__') diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -86,7 +86,7 @@ child.expect('ok!') def test_ioctl_termios(self): - source = py.code.Source(""" + source = py.code.Source(r""" import termios import fcntl lgt = len(fcntl.ioctl(2, termios.TIOCGWINSZ, '\000'*8)) @@ -149,4 +149,7 @@ def test_error_tcsetattr(self): import termios - raises(ValueError, termios.tcsetattr, 0, 1, (1, 2)) + exc = raises(TypeError, termios.tcsetattr, 0, 1, (1, 2)) + assert str(exc.value) == "tcsetattr, arg 3: must be 7 element list" + exc = raises(TypeError, termios.tcsetattr, 0, 1, (1, 2, 3, 4, 5, 6, 7)) + assert str(exc.value) == "tcsetattr, arg 3: must be 7 element list" diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -3,13 +3,12 @@ """ import operator -from rpython.rlib.buffer import Buffer, StringBuffer, SubBuffer +from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from rpython.rlib.objectmodel import compute_hash -from rpython.rlib.rstring import StringBuilder class W_Buffer(W_Root): diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -6,7 +6,7 @@ from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty diff --git a/pypy/sandbox/pypy_interact.py b/pypy/sandbox/pypy_interact.py --- a/pypy/sandbox/pypy_interact.py +++ b/pypy/sandbox/pypy_interact.py @@ -21,7 +21,7 @@ """ import sys, os -sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))) from rpython.translator.sandbox.sandlib import SimpleIOSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSandboxedProc from rpython.translator.sandbox.vfs import Dir, RealDir, RealFile diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -5884,6 +5884,25 @@ """ self.optimize_loop(ops, expected) + def test_bug_unroll_with_immutables(self): + ops = """ + [p0] + i2 = getfield_gc_pure(p0, descr=immut_intval) + p1 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p1, 1242, descr=immut_intval) + jump(p1) + """ + preamble = """ + [p0] + i2 = getfield_gc_pure(p0, descr=immut_intval) + jump() + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected, preamble) + def test_immutable_constantfold_recursive(self): ops = """ [] @@ -8356,6 +8375,31 @@ """ self.optimize_loop(ops, ops) + def test_unroll_failargs(self): + ops = """ + [p0, i1] + p1 = getfield_gc(p0, descr=valuedescr) + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2) + """ + expected = """ + [p0, i1, p1] + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2, p1) + """ + preamble = """ + [p0, i1] + p1 = getfield_gc(p0, descr=valuedescr) + i2 = int_add(i1, 1) + i3 = int_le(i2, 13) + guard_true(i3) [p1] + jump(p0, i2, p1) + """ + self.optimize_loop(ops, expected, preamble) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -180,10 +180,11 @@ self.optimizer.clear_newoperations() for i in range(len(original_jump_args)): + srcbox = jump_args[i] if values[i].is_virtual(): - values[i].force_box(self.optimizer) - if original_jump_args[i] is not jump_args[i]: - op = ResOperation(rop.SAME_AS, [jump_args[i]], original_jump_args[i]) + srcbox = values[i].force_box(self.optimizer) + if original_jump_args[i] is not srcbox: + op = ResOperation(rop.SAME_AS, [srcbox], original_jump_args[i]) self.optimizer.emit_operation(op) inputarg_setup_ops = self.optimizer.get_newoperations() diff --git a/rpython/jit/metainterp/test/test_string.py b/rpython/jit/metainterp/test/test_string.py --- a/rpython/jit/metainterp/test/test_string.py +++ b/rpython/jit/metainterp/test/test_string.py @@ -654,3 +654,23 @@ self.check_resops(call_pure=0, unicodesetitem=0, call=2, newunicode=0, unicodegetitem=0, copyunicodecontent=0) + + def test_string_interpolation(self): + def f(x, y): + return len('<%d %d>' % (x, y)) + res = self.interp_operations(f, [222, 3333]) + assert res == 10 + + def test_string_interpolation_constants(self): + jitdriver = JitDriver(greens=['x', 'y'], reds=['z']) + def f(x, y): + z = 0 + while z < 10: + jitdriver.jit_merge_point(x=x, y=y, z=z) + if len('<%d %d>' % (x, y)) != 10: + raise Exception + z += 1 + return 0 + self.meta_interp(f, [222, 3333]) + self.check_simple_loop({'guard_true': 1, 'int_add': 1, + 'int_lt': 1, 'jump': 1}) diff --git a/rpython/translator/platform/test/test_posix.py b/rpython/translator/platform/test/test_posix.py --- a/rpython/translator/platform/test/test_posix.py +++ b/rpython/translator/platform/test/test_posix.py @@ -9,13 +9,8 @@ res = host.execute('echo', '42 24') assert res.out == '42 24\n' - if sys.platform == 'win32': - # echo is a shell builtin on Windows - res = host.execute('cmd', ['/c', 'echo', '42', '24']) - assert res.out == '42 24\n' - else: - res = host.execute('echo', ['42', '24']) - assert res.out == '42 24\n' + res = host.execute('echo', ['42', '24']) + assert res.out == '42 24\n' class TestMakefile(object): platform = host @@ -61,8 +56,13 @@ finally: del os.environ['PYPY_LOCALBASE'] Makefile = tmpdir.join('Makefile').read() - assert 'INCLUDEDIRS = -I/foo/baz/include' in Makefile - assert 'LIBDIRS = -L/foo/baz/lib' in Makefile + include_prefix = '-I' + lib_prefix = '-L' + if self.platform.name == 'msvc': + include_prefix = '/I' + lib_prefix = '/LIBPATH:' + assert 'INCLUDEDIRS = %s/foo/baz/include' % include_prefix in Makefile + assert 'LIBDIRS = %s/foo/baz/lib' % lib_prefix in Makefile class TestMaemo(TestMakefile): strict_on_stderr = False diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -292,7 +292,10 @@ rel_ofiles = [rel_cfile[:rel_cfile.rfind('.')]+'.obj' for rel_cfile in rel_cfiles] m.cfiles = rel_cfiles - rel_includedirs = [rpyrel(incldir) for incldir in eci.include_dirs] + rel_includedirs = [rpyrel(incldir) for incldir in + self.preprocess_include_dirs(eci.include_dirs)] + rel_libdirs = [rpyrel(libdir) for libdir in + self.preprocess_library_dirs(eci.library_dirs)] m.comment('automatically generated makefile') definitions = [ @@ -302,7 +305,7 @@ ('SOURCES', rel_cfiles), ('OBJECTS', rel_ofiles), ('LIBS', self._libs(eci.libraries)), - ('LIBDIRS', self._libdirs(eci.library_dirs)), + ('LIBDIRS', self._libdirs(rel_libdirs)), ('INCLUDEDIRS', self._includedirs(rel_includedirs)), ('CFLAGS', self.cflags), ('CFLAGSEXTRA', list(eci.compile_extra)), diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -3,6 +3,10 @@ trampolines that marshal their input arguments, dump them to STDOUT, and wait for an answer on STDIN. Enable with 'translate.py --sandbox'. """ +import sys +if sys.platform == 'win32': + raise TypeError("sandbox not supported on windows") + import py from rpython.rlib import rmarshal, types diff --git a/rpython/translator/sandbox/test/test_sandbox.py b/rpython/translator/sandbox/test/test_sandbox.py --- a/rpython/translator/sandbox/test/test_sandbox.py +++ b/rpython/translator/sandbox/test/test_sandbox.py @@ -25,7 +25,20 @@ check_str_without_nul=True) return str(t.compile()) +unsupported_platform = ('False', '') +if sys.platform == 'win32': + unsupported_platform = ('True', 'sandbox not supported on this platform') + def test_unavailable(): + def entry_point(argv): + fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) + os.close(fd) + return 0 + exc = py.test.raises(TypeError, compile, entry_point) + assert str(exc).find('not supported') >= 0 +supported = py.test.mark.skipif(unsupported_platform[0], reason=unsupported_platform[1]) + + at supported def test_open_dup(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -43,6 +56,7 @@ f.close() assert tail == "" + at supported def test_read_write(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -65,6 +79,7 @@ f.close() assert tail == "" + at supported def test_dup2_access(): def entry_point(argv): os.dup2(34, 56) @@ -80,6 +95,7 @@ f.close() assert tail == "" + at supported def test_stat_ftruncate(): from rpython.translator.sandbox.sandlib import RESULTTYPE_STATRESULT from rpython.rlib.rarithmetic import r_longlong @@ -101,6 +117,7 @@ f.close() assert tail == "" + at supported def test_time(): def entry_point(argv): t = time.time() @@ -116,6 +133,7 @@ f.close() assert tail == "" + at supported def test_getcwd(): def entry_point(argv): t = os.getcwd() @@ -131,6 +149,7 @@ f.close() assert tail == "" + at supported def test_oserror(): def entry_point(argv): try: @@ -148,6 +167,7 @@ f.close() assert tail == "" + at supported def test_hybrid_gc(): def entry_point(argv): l = [] @@ -172,6 +192,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported def test_segfault_1(): class A: def __init__(self, m): @@ -194,6 +215,7 @@ e.close() assert 'Invalid RPython operation' in errors + at supported def test_segfault_2(): py.test.skip("hum, this is one example, but we need to be very careful") class Base: @@ -226,6 +248,7 @@ e.close() assert '...think what kind of errors to get...' in errors + at supported def test_safe_alloc(): from rpython.rlib.rmmap import alloc, free @@ -246,6 +269,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported def test_unsafe_mmap(): py.test.skip("Since this stuff is unimplemented, it won't work anyway " "however, the day it starts working, it should pass test") @@ -271,6 +295,7 @@ rescode = pipe.wait() assert rescode == 0 + at supported class TestPrintedResults: def run(self, entry_point, args, expected): diff --git a/rpython/translator/sandbox/test/test_sandlib.py b/rpython/translator/sandbox/test/test_sandlib.py --- a/rpython/translator/sandbox/test/test_sandlib.py +++ b/rpython/translator/sandbox/test/test_sandlib.py @@ -6,10 +6,10 @@ from rpython.translator.sandbox.sandlib import SimpleIOSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSandboxedProc from rpython.translator.sandbox.sandlib import VirtualizedSocketProc -from rpython.translator.sandbox.test.test_sandbox import compile +from rpython.translator.sandbox.test.test_sandbox import compile, supported from rpython.translator.sandbox.vfs import Dir, File, RealDir, RealFile - + at supported class MockSandboxedProc(SandboxedProc): """A sandbox process wrapper that replays expected syscalls.""" @@ -35,7 +35,7 @@ do_ll_os__ll_os_write = _make_method("write") do_ll_os__ll_os_close = _make_method("close") - + at supported def test_lib(): def entry_point(argv): fd = os.open("/tmp/foobar", os.O_RDONLY, 0777) @@ -63,6 +63,7 @@ proc.handle_forever() assert proc.seen == len(proc.expected) + at supported def test_foobar(): py.test.skip("to be updated") foobar = rffi.llexternal("foobar", [rffi.CCHARP], rffi.LONG) @@ -79,6 +80,7 @@ proc.handle_forever() assert proc.seen == len(proc.expected) + at supported def test_simpleio(): def entry_point(argv): print "Please enter a number:" @@ -100,6 +102,7 @@ assert output == "Please enter a number:\nThe double is: 42\n" assert error == "" + at supported def test_socketio(): class SocketProc(VirtualizedSocketProc, SimpleIOSandboxedProc): def build_virtual_root(self): @@ -116,6 +119,7 @@ output, error = proc.communicate("") assert output.startswith('HTTP/1.0 503 Service Unavailable') + at supported def test_oserror(): def entry_point(argv): try: @@ -133,6 +137,7 @@ assert proc.seen == len(proc.expected) + at supported class SandboxedProcWithFiles(VirtualizedSandboxedProc, SimpleIOSandboxedProc): """A sandboxed process with a simple virtualized filesystem. @@ -145,6 +150,7 @@ 'this.pyc': RealFile(__file__), }) + at supported def test_too_many_opens(): def entry_point(argv): try: @@ -186,6 +192,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_fstat(): def compare(a, b, i): if a != b: @@ -219,6 +226,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_lseek(): def char_should_be(c, should): if c != should: @@ -248,6 +256,7 @@ assert output == "All ok!\n" assert error == "" + at supported def test_getuid(): def entry_point(argv): import os diff --git a/rpython/translator/sandbox/test/test_vfs.py b/rpython/translator/sandbox/test/test_vfs.py --- a/rpython/translator/sandbox/test/test_vfs.py +++ b/rpython/translator/sandbox/test/test_vfs.py @@ -2,10 +2,13 @@ import sys, stat, os from rpython.translator.sandbox.vfs import * from rpython.tool.udir import udir +from rpython.translator.sandbox.test.test_sandbox import unsupported_platform HASLINK = hasattr(os, 'symlink') def setup_module(mod): + if unsupported_platform[0] == 'True': + py.test.skip(unsupported_platform[1]) d = udir.ensure('test_vfs', dir=1) d.join('file1').write('somedata1') d.join('file2').write('somelongerdata2') From noreply at buildbot.pypy.org Sat Apr 26 21:46:42 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 26 Apr 2014 21:46:42 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add range() attributes: start, stop, step. Message-ID: <20140426194642.26B591C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r71009:135689ea903c Date: 2014-04-26 15:40 +0200 http://bitbucket.org/pypy/pypy/changeset/135689ea903c/ Log: Add range() attributes: start, stop, step. diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -7,7 +7,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.typedef import TypeDef, interp_attrproperty_w from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_uint, intmask @@ -459,6 +459,9 @@ __contains__ = interp2app(W_Range.descr_contains), count = interp2app(W_Range.descr_count), index = interp2app(W_Range.descr_index), + start = interp_attrproperty_w('w_start', cls=W_Range), + stop = interp_attrproperty_w('w_stop', cls=W_Range), + step = interp_attrproperty_w('w_step', cls=W_Range), ) W_Range.typedef.acceptable_as_base_class = False diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -449,6 +449,19 @@ x = range(0, -M, 1 - M) assert repr(x) == 'range(0, %s, %s)' % (-M, 1 - M), repr(x) + def test_range_attributes(self): + rangeobj = range(3, 4, 5) + assert rangeobj.start == 3 + assert rangeobj.stop == 4 + assert rangeobj.step == 5 + + raises(AttributeError, "rangeobj.start = 0") + raises(AttributeError, "rangeobj.stop = 10") + raises(AttributeError, "rangeobj.step = 1") + raises(AttributeError, "del rangeobj.start") + raises(AttributeError, "del rangeobj.stop") + raises(AttributeError, "del rangeobj.step") + class AppTestReversed: def test_reversed(self): From noreply at buildbot.pypy.org Sat Apr 26 21:46:43 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 26 Apr 2014 21:46:43 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Equality for range objects is now based on equality of the underlying sequences Message-ID: <20140426194643.59DE21C023E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r71010:b33a765ba524 Date: 2014-04-26 21:45 +0200 http://bitbucket.org/pypy/pypy/changeset/b33a765ba524/ Log: Equality for range objects is now based on equality of the underlying sequences (CPython Issue #13201) diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -447,6 +447,31 @@ w_index = space.sub(w_item, self.w_start) return space.floordiv(w_index, self.w_step) + def descr_eq(self, space, w_other): + # Compare two range objects. + if space.is_w(self, w_other): + return space.w_True + if not isinstance(w_other, W_Range): + return space.w_NotImplemented + if not space.eq_w(self.w_length, w_other.w_length): + return space.w_False + if space.eq_w(self.w_length, space.wrap(0)): + return space.w_True + if not space.eq_w(self.w_start, w_other.w_start): + return space.w_False + if space.eq_w(self.w_length, space.wrap(1)): + return space.w_True + return space.eq(self.w_step, w_other.w_step) + + def descr_hash(self, space): + if space.eq_w(self.w_length, space.wrap(0)): + w_tup = space.newtuple([self.w_length, space.w_None, space.w_None]) + elif space.eq_w(self.w_length, space.wrap(0)): + w_tup = space.newtuple([self.w_length, self.w_start, space.w_None]) + else: + w_tup = space.newtuple([self.w_length, self.w_start, self.w_step]) + return space.hash(w_tup) + W_Range.typedef = TypeDef("range", __new__ = interp2app(W_Range.descr_new.im_func), @@ -457,6 +482,8 @@ __reversed__ = interp2app(W_Range.descr_reversed), __reduce__ = interp2app(W_Range.descr_reduce), __contains__ = interp2app(W_Range.descr_contains), + __eq__ = interp2app(W_Range.descr_eq), + __hash__ = interp2app(W_Range.descr_hash), count = interp2app(W_Range.descr_count), index = interp2app(W_Range.descr_index), start = interp_attrproperty_w('w_start', cls=W_Range), diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -462,6 +462,50 @@ raises(AttributeError, "del rangeobj.stop") raises(AttributeError, "del rangeobj.step") + def test_comparison(self): + test_ranges = [range(0), range(0, -1), range(1, 1, 3), + range(1), range(5, 6), range(5, 6, 2), + range(5, 7, 2), range(2), range(0, 4, 2), + range(0, 5, 2), range(0, 6, 2)] + test_tuples = list(map(tuple, test_ranges)) + + # Check that equality of ranges matches equality of the corresponding + # tuples for each pair from the test lists above. + ranges_eq = [a == b for a in test_ranges for b in test_ranges] + tuples_eq = [a == b for a in test_tuples for b in test_tuples] + assert ranges_eq == tuples_eq + + # Check that != correctly gives the logical negation of == + ranges_ne = [a != b for a in test_ranges for b in test_ranges] + assert ranges_ne == [not x for x in ranges_eq] + + # Equal ranges should have equal hashes. + for a in test_ranges: + for b in test_ranges: + if a == b: + assert (hash(a), hash(b)) + + # Ranges are unequal to other types (even sequence types) + assert (range(0) == ()) is False + assert (() == range(0)) is False + assert (range(2) == [0, 1]) is False + + # Huge integers aren't a problem. + assert range(0, 2**100 - 1, 2) == range(0, 2**100, 2) + assert hash(range(0, 2**100 - 1, 2)) == hash(range(0, 2**100, 2)) + assert range(0, 2**100, 2) != range(0, 2**100 + 1, 2) + assert (range(2**200, 2**201 - 2**99, 2**100) == + range(2**200, 2**201, 2**100)) + assert (hash(range(2**200, 2**201 - 2**99, 2**100)) == + hash(range(2**200, 2**201, 2**100))) + assert (range(2**200, 2**201, 2**100) != + range(2**200, 2**201 + 1, 2**100)) + + # Order comparisons are not implemented for ranges. + raises(TypeError, "range(0) < range(0)") + raises(TypeError, "range(0) > range(0)") + raises(TypeError, "range(0) <= range(0)") + raises(TypeError, "range(0) >= range(0)") class AppTestReversed: def test_reversed(self): From noreply at buildbot.pypy.org Sun Apr 27 00:00:25 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 27 Apr 2014 00:00:25 +0200 (CEST) Subject: [pypy-commit] pypy default: add test for issue 1696 Message-ID: <20140426220025.3328C1C1149@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71011:5429a238b778 Date: 2014-04-27 00:56 +0300 http://bitbucket.org/pypy/pypy/changeset/5429a238b778/ Log: add test for issue 1696 diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -334,7 +334,9 @@ self.writefile("directory/package/__init__.py", "") importer = zipimport.zipimporter(self.zipfile + "/directory") l = [i for i in zipimport._zip_directory_cache] - assert len(l) + assert len(l) == 1 + k = zipimport._zip_directory_cache[l[0]].keys() + assert k[0] == os.path.sep.join(['directory','package','__init__.py']) def test_path_hooks(self): import sys From noreply at buildbot.pypy.org Sun Apr 27 10:42:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 10:42:10 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1743: fix for a726eef8da83 Message-ID: <20140427084210.298241C3201@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71012:d083e472a6ab Date: 2014-04-27 10:40 +0200 http://bitbucket.org/pypy/pypy/changeset/d083e472a6ab/ Log: issue1743: fix for a726eef8da83 diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -299,9 +299,13 @@ """ self._application_traceback = traceback - at specialize.memo() + +class ClearedOpErr: + def __init__(self, space): + self.operr = OperationError(space.w_None, space.w_None) + def get_cleared_operation_error(space): - return OperationError(space.w_None, space.w_None) + return space.fromcache(ClearedOpErr).operr # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -205,11 +205,14 @@ def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! """Implements sys.exc_info(). Return an OperationError instance or None.""" - frame = self.gettopframe_nohidden() + frame = self.gettopframe() while frame: if frame.last_exception is not None: - return frame.last_exception - frame = self.getnextframe_nohidden(frame) + if (not frame.hide() or + frame.last_exception is + get_cleared_operation_error(self.space)): + return frame.last_exception + frame = frame.f_backref() return None def set_sys_exc_info(self, operror): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -1,6 +1,6 @@ from rpython.rlib.rstacklet import StackletThread from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef @@ -39,6 +39,7 @@ bottomframe.locals_stack_w[1] = w_callable bottomframe.locals_stack_w[2] = w_args bottomframe.locals_stack_w[3] = w_kwds + bottomframe.last_exception = get_cleared_operation_error(space) self.bottomframe = bottomframe # global_state.origin = self diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -684,3 +684,17 @@ execfile(self.translated, d) d['set_fast_mode']() d['test_various_depths']() + + def test_exc_info_doesnt_follow_continuations(self): + import sys + from _continuation import continulet + # + def f1(c1): + return sys.exc_info() + # + c1 = continulet(f1) + try: + 1 // 0 + except ZeroDivisionError: + got = c1.switch() + assert got == (None, None, None) From noreply at buildbot.pypy.org Sun Apr 27 10:58:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 10:58:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a warning that it's tested with PyPy2. Message-ID: <20140427085815.084D31C3201@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71013:8d74019d7760 Date: 2014-04-27 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/8d74019d7760/ Log: Add a warning that it's tested with PyPy2. diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -42,6 +42,10 @@ use this sandboxed PyPy from a regular Python interpreter (CPython, or an unsandboxed PyPy). Contributions welcome. +.. warning:: + + Tested with PyPy2. May not work out of the box with PyPy3. + Overview -------- From noreply at buildbot.pypy.org Sun Apr 27 11:02:41 2014 From: noreply at buildbot.pypy.org (kirbyfan64sos) Date: Sun, 27 Apr 2014 11:02:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Fixed issue 1739 Message-ID: <20140427090241.345E21D240F@cobra.cs.uni-duesseldorf.de> Author: Ryan Gonzalez Branch: Changeset: r71014:c9b6527cd5a6 Date: 2014-04-23 20:32 +0000 http://bitbucket.org/pypy/pypy/changeset/c9b6527cd5a6/ Log: Fixed issue 1739 diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -48,8 +48,8 @@ ('r', True): O_RDWR, ('w', False): O_WRONLY | O_CREAT | O_TRUNC, ('w', True): O_RDWR | O_CREAT | O_TRUNC, - ('a', False): O_WRONLY | O_CREAT, - ('a', True): O_RDWR | O_CREAT, + ('a', False): O_WRONLY | O_CREAT | O_APPEND, + ('a', True): O_RDWR | O_CREAT | O_APPEND, } class MyNotImplementedError(Exception): From noreply at buildbot.pypy.org Sun Apr 27 11:02:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:02:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Fixed issue 1739 Message-ID: <20140427090242.5F9F51D240F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71015:0db096e05bee Date: 2014-04-27 11:02 +0200 http://bitbucket.org/pypy/pypy/changeset/0db096e05bee/ Log: Fixed issue 1739 Merged in kirbyfan64sos/pypy (pull request #232). Tests missing so far, but I'm merging anyway for now. diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -48,8 +48,8 @@ ('r', True): O_RDWR, ('w', False): O_WRONLY | O_CREAT | O_TRUNC, ('w', True): O_RDWR | O_CREAT | O_TRUNC, - ('a', False): O_WRONLY | O_CREAT, - ('a', True): O_RDWR | O_CREAT, + ('a', False): O_WRONLY | O_CREAT | O_APPEND, + ('a', True): O_RDWR | O_CREAT | O_APPEND, } class MyNotImplementedError(Exception): From noreply at buildbot.pypy.org Sun Apr 27 11:05:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:05:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Bah. Message-ID: <20140427090505.DB42A1D240F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71016:f80ed2c760f6 Date: 2014-04-27 11:04 +0200 http://bitbucket.org/pypy/pypy/changeset/f80ed2c760f6/ Log: Bah. diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -40,7 +40,7 @@ from rpython.rlib import rposix from rpython.rlib.rstring import StringBuilder -from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC +from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC, O_APPEND O_BINARY = getattr(os, "O_BINARY", 0) # (basemode, plus) From noreply at buildbot.pypy.org Sun Apr 27 11:26:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:47 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head ef46b0734b9c on branch numpy-andrew-tests Message-ID: <20140427092647.2A55A1D29D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71017:22ab30b21fb5 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/22ab30b21fb5/ Log: Merge closed head ef46b0734b9c on branch numpy-andrew-tests From noreply at buildbot.pypy.org Sun Apr 27 11:26:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:48 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 28f44947550a on branch remove-num-smm Message-ID: <20140427092648.332BB1D29D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71018:bc04463672bc Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/bc04463672bc/ Log: Merge closed head 28f44947550a on branch remove-num-smm From noreply at buildbot.pypy.org Sun Apr 27 11:26:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:49 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 505a7c83ec97 on branch link-old-glibc-abi Message-ID: <20140427092649.358051D29DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71019:e8cfbea9dec3 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/e8cfbea9dec3/ Log: Merge closed head 505a7c83ec97 on branch link-old-glibc-abi From noreply at buildbot.pypy.org Sun Apr 27 11:26:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:50 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head a1c20073e6f9 on branch bounds-int-add-or Message-ID: <20140427092650.371901D29DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71020:e87f9a40ae1a Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/e87f9a40ae1a/ Log: Merge closed head a1c20073e6f9 on branch bounds-int-add-or From noreply at buildbot.pypy.org Sun Apr 27 11:26:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:51 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 651f13647de9 on branch numpypy-remove-scalar Message-ID: <20140427092651.347A71D29DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71021:500e731dbeee Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/500e731dbeee/ Log: Merge closed head 651f13647de9 on branch numpypy-remove-scalar From noreply at buildbot.pypy.org Sun Apr 27 11:26:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:52 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head d19334a5b622 on branch numpypy-ellipse-indexing Message-ID: <20140427092652.353A41D29DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71022:d92ac4235536 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/d92ac4235536/ Log: Merge closed head d19334a5b622 on branch numpypy-ellipse-indexing From noreply at buildbot.pypy.org Sun Apr 27 11:26:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:53 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head aaf78b39188c on branch numpy-refactor Message-ID: <20140427092653.45D641D29DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71023:c83ac6c33ca6 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/c83ac6c33ca6/ Log: Merge closed head aaf78b39188c on branch numpy-refactor From noreply at buildbot.pypy.org Sun Apr 27 11:26:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:54 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head c20fe0db3889 on branch simple-range-strategy Message-ID: <20140427092654.428C11D29DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71024:2bd15d2a6966 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/2bd15d2a6966/ Log: Merge closed head c20fe0db3889 on branch simple-range-strategy From noreply at buildbot.pypy.org Sun Apr 27 11:26:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:55 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 6247a9f002ec on branch numpypy-nditer Message-ID: <20140427092655.41A191D29DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71025:7d13f1383222 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/7d13f1383222/ Log: Merge closed head 6247a9f002ec on branch numpypy-nditer From noreply at buildbot.pypy.org Sun Apr 27 11:26:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:56 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 5930d01b5418 on branch numpy-speed Message-ID: <20140427092656.3BAB61D29DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71026:471d41de05c8 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/471d41de05c8/ Log: Merge closed head 5930d01b5418 on branch numpy-speed From noreply at buildbot.pypy.org Sun Apr 27 11:26:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:57 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 81323a128386 on branch openbsd-lib-prefix Message-ID: <20140427092657.3E8481D29E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71027:3b8b1636cfcf Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/3b8b1636cfcf/ Log: Merge closed head 81323a128386 on branch openbsd-lib-prefix From noreply at buildbot.pypy.org Sun Apr 27 11:26:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Apr 2014 11:26:58 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <20140427092658.3C2961D29E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: closed-branches Changeset: r71028:34dd4da8ce57 Date: 2014-04-27 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/34dd4da8ce57/ Log: re-close this branch From noreply at buildbot.pypy.org Sun Apr 27 19:05:05 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 27 Apr 2014 19:05:05 +0200 (CEST) Subject: [pypy-commit] pypy default: don't use deprecated 'new' module Message-ID: <20140427170505.47D7E1C3395@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71029:26d73c5f5c38 Date: 2014-04-27 18:04 +0100 http://bitbucket.org/pypy/pypy/changeset/26d73c5f5c38/ Log: don't use deprecated 'new' module diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1,5 +1,5 @@ from __future__ import with_statement -import new +import types import py from contextlib import contextmanager @@ -943,7 +943,7 @@ def monkey_patch_code(self, code, stacksize, flags, codestring, names, varnames): c = code - return new.code(c.co_argcount, c.co_nlocals, stacksize, flags, + return types.CodeType(c.co_argcount, c.co_nlocals, stacksize, flags, codestring, c.co_consts, names, varnames, c.co_filename, c.co_name, c.co_firstlineno, c.co_lnotab) @@ -964,7 +964,7 @@ # this code is generated by pypy-c when compiling above f pypy_code = 't\x00\x00\x83\x00\x00}\x00\x00|\x00\x00\xc9\x01\x00\xca\x00\x00S' new_c = self.monkey_patch_code(f.func_code, 3, 3, pypy_code, ('X', 'x', 'm'), ('x',)) - f2 = new.function(new_c, locals(), 'f') + f2 = types.FunctionType(new_c, locals(), 'f') graph = self.codetest(f2) all_ops = self.all_operations(graph) @@ -984,7 +984,7 @@ pypy_code = 'd\x01\x00\xcb\x00\x00D]\x0c\x00}\x00\x00|\x00\x00^\x02\x00q\x07\x00S' new_c = self.monkey_patch_code(f.func_code, 3, 67, pypy_code, (), ('i',)) - f2 = new.function(new_c, locals(), 'f') + f2 = types.FunctionType(new_c, locals(), 'f') graph = self.codetest(f2) all_ops = self.all_operations(graph) diff --git a/rpython/tool/sourcetools.py b/rpython/tool/sourcetools.py --- a/rpython/tool/sourcetools.py +++ b/rpython/tool/sourcetools.py @@ -6,7 +6,7 @@ # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. -import sys, os, inspect, new +import sys, os, inspect, types import py def render_docstr(func, indent_str='', closing_str=''): @@ -127,7 +127,7 @@ for name in names: if name not in kwargs: kwargs[name] = getattr(fromcode, name) - return new.code( + return types.CodeType( kwargs['co_argcount'], kwargs['co_nlocals'], kwargs['co_stacksize'], @@ -218,9 +218,8 @@ """Make a renamed copy of a function.""" if globals is None: globals = func.func_globals - f = new.function(func.func_code, globals, - newname, func.func_defaults, - func.func_closure) + f = types.FunctionType(func.func_code, globals, newname, + func.func_defaults, func.func_closure) if func.func_dict: f.func_dict = {} f.func_dict.update(func.func_dict) From noreply at buildbot.pypy.org Sun Apr 27 21:46:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 27 Apr 2014 21:46:32 +0200 (CEST) Subject: [pypy-commit] pypy default: fix _zip_directory_cache Message-ID: <20140427194632.6FE6D1C244E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71030:5a387dadf128 Date: 2014-04-27 22:44 +0300 http://bitbucket.org/pypy/pypy/changeset/5a387dadf128/ Log: fix _zip_directory_cache diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -56,6 +56,8 @@ w = space.wrap w_d = space.newdict() for key, info in w_zipimporter.zip_file.NameToInfo.iteritems(): + if ZIPSEP != os.path.sep: + key = key.replace(ZIPSEP, os.path.sep) space.setitem(w_d, w(key), space.newtuple([ w(info.filename), w(info.compress_type), w(info.compress_size), w(info.file_size), w(info.file_offset), w(info.dostime), diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -157,7 +157,6 @@ import sys, os self.writefile("uuu.py", "def f(x): return x") mod = __import__('uuu', globals(), locals(), []) - print mod assert mod.f(3) == 3 expected = { '__doc__' : None, From noreply at buildbot.pypy.org Sun Apr 27 22:22:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 27 Apr 2014 22:22:07 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test collection crash on msvc Message-ID: <20140427202207.446BA1D2932@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71031:83ace05284a9 Date: 2014-04-27 23:00 +0300 http://bitbucket.org/pypy/pypy/changeset/83ace05284a9/ Log: fix test collection crash on msvc diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -521,6 +521,15 @@ /* misc helpers ----------------------------------------------------------- */ +#if defined(_MSC_VER) +long long cppyy_strtoll(const char* str) { + return _strtoi64(str, NULL, 0); +} + +extern "C" unsigned long long cppyy_strtoull(const char* str) { + return _strtoui64(str, NULL, 0); +} +#else long long cppyy_strtoll(const char* str) { return strtoll(str, NULL, 0); } @@ -528,6 +537,7 @@ extern "C" unsigned long long cppyy_strtoull(const char* str) { return strtoull(str, NULL, 0); } +#endif void cppyy_free(void* ptr) { free(ptr); From noreply at buildbot.pypy.org Mon Apr 28 02:15:38 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 28 Apr 2014 02:15:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Use collections.MutableMapping instead of deprecated UserDict.DictMixin Message-ID: <20140428001538.1312A1C01CB@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71032:744ec0879eb5 Date: 2014-04-28 01:14 +0100 http://bitbucket.org/pypy/pypy/changeset/744ec0879eb5/ Log: Use collections.MutableMapping instead of deprecated UserDict.DictMixin diff --git a/rpython/tool/identity_dict.py b/rpython/tool/identity_dict.py --- a/rpython/tool/identity_dict.py +++ b/rpython/tool/identity_dict.py @@ -3,15 +3,15 @@ except ImportError: idict = None -from UserDict import DictMixin +from collections import MutableMapping -class IdentityDictPurePython(object, DictMixin): +class IdentityDictPurePython(MutableMapping): __slots__ = "_dict _keys".split() def __init__(self): self._dict = {} - self._keys = {} # id(obj) -> obj + self._keys = {} # id(obj) -> obj def __getitem__(self, arg): return self._dict[id(arg)] @@ -24,8 +24,11 @@ del self._keys[id(arg)] del self._dict[id(arg)] - def keys(self): - return self._keys.values() + def __iter__(self): + return self._keys.itervalues() + + def __len__(self): + return len(self._keys) def __contains__(self, arg): return id(arg) in self._dict @@ -37,8 +40,7 @@ return d -class IdentityDictPyPy(object, DictMixin): - __slots__ = ["_dict"] +class IdentityDictPyPy(MutableMapping): def __init__(self): self._dict = idict() @@ -52,8 +54,11 @@ def __delitem__(self, arg): del self._dict[arg] - def keys(self): - return self._dict.keys() + def __iter__(self): + return iter(self._dict.keys()) + + def __len__(self): + return len(self._dict) def __contains__(self, arg): return arg in self._dict @@ -64,8 +69,10 @@ assert len(d) == len(self) return d + def __nonzero__(self): + return bool(self._dict) + if idict is None: identity_dict = IdentityDictPurePython else: identity_dict = IdentityDictPyPy - From noreply at buildbot.pypy.org Mon Apr 28 06:18:48 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 28 Apr 2014 06:18:48 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test for cpython and pypy Message-ID: <20140428041848.278C11C01CB@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71033:6b69d6b92167 Date: 2014-04-28 07:17 +0300 http://bitbucket.org/pypy/pypy/changeset/6b69d6b92167/ Log: fix test for cpython and pypy diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -135,8 +135,7 @@ importer = zipimport.zipimporter(os.path.join(zip_path, '_pkg')) assert zip_path in zipimport._zip_directory_cache file_set = set(zipimport._zip_directory_cache[zip_path].iterkeys()) - compare_set = set(path.replace(os.path.sep, '/') + '.py' - for path in self.created_paths) + compare_set = set(path + '.py' for path in self.created_paths) assert file_set == compare_set finally: self.cleanup_zipfile(self.created_paths) From noreply at buildbot.pypy.org Mon Apr 28 09:35:22 2014 From: noreply at buildbot.pypy.org (kirbyfan64sos) Date: Mon, 28 Apr 2014 09:35:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Added test for fix for issue 1739 Message-ID: <20140428073522.3577B1D25F6@cobra.cs.uni-duesseldorf.de> Author: Ryan Gonzalez Branch: Changeset: r71034:bc7d31eac294 Date: 2014-04-27 22:30 +0000 http://bitbucket.org/pypy/pypy/changeset/bc7d31eac294/ Log: Added test for fix for issue 1739 diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -1104,6 +1104,25 @@ finally: signal(SIGALRM, SIG_DFL) + def test_append_mode(self): + try: + fo = streamio.open_file_as_stream # shorthand + x = fo('.test.file', 'w') + x.write('abc123') + x.close() + + x = fo('.test.file', 'a') + x.write('456') + x.close() + x = fo('.test.file', 'r') + assert x.read() == 'abc123456' + x.close() + except: + raise + finally: + if os.path.exists('.test.file'): + os.remove('.test.file') + # Speed test From noreply at buildbot.pypy.org Mon Apr 28 09:35:23 2014 From: noreply at buildbot.pypy.org (kirbyfan64sos) Date: Mon, 28 Apr 2014 09:35:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed redundant 'except' clause Message-ID: <20140428073523.61FFC1D25F6@cobra.cs.uni-duesseldorf.de> Author: Ryan Gonzalez Branch: Changeset: r71035:f1b863443b2d Date: 2014-04-27 23:16 +0000 http://bitbucket.org/pypy/pypy/changeset/f1b863443b2d/ Log: Removed redundant 'except' clause diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -1117,8 +1117,6 @@ x = fo('.test.file', 'r') assert x.read() == 'abc123456' x.close() - except: - raise finally: if os.path.exists('.test.file'): os.remove('.test.file') From noreply at buildbot.pypy.org Mon Apr 28 09:35:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 09:35:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the test: Message-ID: <20140428073524.863AB1D25F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71036:d63b49940eff Date: 2014-04-28 09:30 +0200 http://bitbucket.org/pypy/pypy/changeset/d63b49940eff/ Log: Fix the test: - make it pass (O_APPEND missing) - make it fail before the O_APPEND change diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -40,7 +40,7 @@ from rpython.rlib import rposix from rpython.rlib.rstring import StringBuilder -from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC +from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC, O_APPEND O_BINARY = getattr(os, "O_BINARY", 0) # (basemode, plus) diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -1105,21 +1105,19 @@ signal(SIGALRM, SIG_DFL) def test_append_mode(self): - try: - fo = streamio.open_file_as_stream # shorthand - x = fo('.test.file', 'w') - x.write('abc123') - x.close() + tfn = str(udir.join('streamio-append-mode')) + fo = streamio.open_file_as_stream # shorthand + x = fo(tfn, 'w') + x.write('abc123') + x.close() - x = fo('.test.file', 'a') - x.write('456') - x.close() - x = fo('.test.file', 'r') - assert x.read() == 'abc123456' - x.close() - finally: - if os.path.exists('.test.file'): - os.remove('.test.file') + x = fo(tfn, 'a') + x.seek(0, 0) + x.write('456') + x.close() + x = fo(tfn, 'r') + assert x.read() == 'abc123456' + x.close() # Speed test From noreply at buildbot.pypy.org Mon Apr 28 09:35:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 09:35:29 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140428073529.24C5A1D25F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71037:da4834c4bc34 Date: 2014-04-28 09:30 +0200 http://bitbucket.org/pypy/pypy/changeset/da4834c4bc34/ Log: merge heads diff too long, truncating to 2000 out of 11379 lines diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + with fp: + imp.load_module('_testcapi', fp, filename, description) except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. @@ -577,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,88 @@ +======================================= +PyPy 2.3 - XXXX TODO +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. We showed quite a bit of progress on all three projects (see below) +and we're slowly running out of funds. +Please consider donating more so we can finish those projects! The three +projects are: + +* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* STM (software transactional memory): a preview will be released very soon, + as soon as we fix a few bugs + +* NumPy: the work done is included in the PyPy 2.2 release. More details below. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* Our Garbage Collector is now "incremental". It should avoid almost + all pauses due to a major collection taking place. Previously, it + would pause the program (rarely) to walk all live objects, which + could take arbitrarily long if your process is using a whole lot of + RAM. Now the same work is done in steps. This should make PyPy + more responsive, e.g. in games. There are still other pauses, from + the GC and the JIT, but they should be on the order of 5 + milliseconds each. + +* The JIT counters for hot code were never reset, which meant that a + process running for long enough would eventually JIT-compile more + and more rarely executed code. Not only is it useless to compile + such code, but as more compiled code means more memory used, this + gives the impression of a memory leak. This has been tentatively + fixed by decreasing the counters from time to time. + +* NumPy has been split: now PyPy only contains the core module, called + ``_numpypy``. The ``numpy`` module itself has been moved to + ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. + You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* non-inlined calls have less overhead + +* Things that use ``sys.set_trace`` are now JITted (like coverage) + +* JSON decoding is now very fast (JSON encoding was already very fast) + +* various buffer copying methods experience speedups (like list-of-ints to + ``int[]`` buffer from cffi) + +* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, + including ``os.startfile()`` on Windows and a handful of rare ones + on Posix. + +* numpy has a rudimentary C API that cooperates with ``cpyext`` + +Cheers, +Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -42,6 +42,10 @@ use this sandboxed PyPy from a regular Python interpreter (CPython, or an unsandboxed PyPy). Contributions welcome. +.. warning:: + + Tested with PyPy2. May not work out of the box with PyPy3. + Overview -------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,7 +5,10 @@ .. this is a revision shortly after release-2.3.x .. startrev: ba569fe1efdb - - .. branch: small-unroll-improvements Improve optimiziation of small allocation-heavy loops in the JIT + +.. branch: reflex-support + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) + compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX @@ -194,13 +194,37 @@ def immutable_unique_id(self, space): return None - def buffer_w(self, space): + def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): - return w_result.buffer_w(space) - self._typed_unwrap_error(space, "buffer") + return w_result.buffer_w(space, flags) + raise TypeError + + def readbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.readbuf_w(space) + raise TypeError + + def writebuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.writebuf_w(space) + raise TypeError + + def charbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.charbuf_w(space) + raise TypeError def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1340,25 +1364,111 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - def buffer_w(self, w_obj): - return w_obj.buffer_w(self) + BUF_SIMPLE = 0x0000 + BUF_WRITABLE = 0x0001 + BUF_FORMAT = 0x0004 + BUF_ND = 0x0008 + BUF_STRIDES = 0x0010 | BUF_ND + BUF_INDIRECT = 0x0100 | BUF_STRIDES - def rwbuffer_w(self, w_obj): - # returns a RWBuffer instance - from pypy.interpreter.buffer import RWBuffer - buffer = self.buffer_w(w_obj) - if not isinstance(buffer, RWBuffer): - raise OperationError(self.w_TypeError, - self.wrap('read-write buffer expected')) - return buffer + BUF_CONTIG_RO = BUF_ND + BUF_CONTIG = BUF_ND | BUF_WRITABLE - def bufferstr_new_w(self, w_obj): - # Implement the "new buffer interface" (new in Python 2.7) - # returning an unwrapped string. It doesn't accept unicode - # strings - buffer = self.buffer_w(w_obj) - return buffer.as_str() + BUF_FULL_RO = BUF_INDIRECT | BUF_FORMAT + BUF_FULL = BUF_INDIRECT | BUF_FORMAT | BUF_WRITABLE + def check_buf_flags(self, flags, readonly): + if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: + raise oefmt(self.w_BufferError, "Object is not writable.") + + def buffer_w(self, w_obj, flags): + # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) + try: + return w_obj.buffer_w(self, flags) + except TypeError: + raise oefmt(self.w_TypeError, + "'%T' does not have the buffer interface", w_obj) + + def readbuf_w(self, w_obj): + # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) + try: + return w_obj.readbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a readable buffer object") + + def writebuf_w(self, w_obj): + # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) + try: + return w_obj.writebuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a writeable buffer object") + + def charbuf_w(self, w_obj): + # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) + try: + return w_obj.charbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a character buffer object") + + def _getarg_error(self, expected, w_obj): + if self.is_none(w_obj): + name = "None" + else: + name = self.type(w_obj).get_module_type_name() + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + + @specialize.arg(1) + def getarg_w(self, code, w_obj): + if code == 'z*': + if self.is_none(w_obj): + return None + code = 's*' + if code == 's*': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.readbuf_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).readbuf_w(self) + try: + return w_obj.buffer_w(self, 0) + except TypeError: + pass + try: + return w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + elif code == 's#': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.str_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).str_w(self) + try: + return w_obj.readbuf_w(self).as_str() + except TypeError: + self._getarg_error("string or read-only buffer", w_obj) + elif code == 'w*': + try: + try: + return w_obj.buffer_w(self, self.BUF_WRITABLE) + except OperationError: + self._getarg_error("read-write buffer", w_obj) + except TypeError: + pass + try: + return w_obj.writebuf_w(self) + except TypeError: + self._getarg_error("read-write buffer", w_obj) + elif code == 't#': + try: + return w_obj.charbuf_w(self) + except TypeError: + self._getarg_error("string or read-only character buffer", w_obj) + else: + assert False + + # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): @@ -1373,8 +1483,18 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - buffer = self.buffer_w(w_obj) - return buffer.as_str() + try: + buf = w_obj.buffer_w(self, 0) + except TypeError: + pass + else: + return buf.as_str() + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + else: + return buf.as_str() def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): @@ -1721,6 +1841,7 @@ 'AssertionError', 'AttributeError', 'BaseException', + 'BufferError', 'DeprecationWarning', 'EOFError', 'EnvironmentError', diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py deleted file mode 100644 --- a/pypy/interpreter/buffer.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Buffer protocol support. -""" -from rpython.rlib.objectmodel import import_from_mixin - - -class Buffer(object): - """Abstract base class for buffers.""" - __slots__ = [] - - def getlength(self): - raise NotImplementedError - - def as_str(self): - "Returns an interp-level string with the whole content of the buffer." - # May be overridden. - return self.getslice(0, self.getlength(), 1, self.getlength()) - - def getitem(self, index): - "Returns the index'th character in the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def getslice(self, start, stop, step, size): - # May be overridden. No bounds checks. - return ''.join([self.getitem(i) for i in range(start, stop, step)]) - - def get_raw_address(self): - raise ValueError("no raw buffer") - - def is_writable(self): - return False - - -class RWBuffer(Buffer): - """Abstract base class for read-write buffers.""" - __slots__ = [] - - def is_writable(self): - return True - - def setitem(self, index, char): - "Write a character into the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def setslice(self, start, string): - # May be overridden. No bounds checks. - for i in range(len(string)): - self.setitem(start + i, string[i]) - - -class StringBuffer(Buffer): - __slots__ = ['value'] - - def __init__(self, value): - self.value = value - - def getlength(self): - return len(self.value) - - def as_str(self): - return self.value - - def getitem(self, index): - return self.value[index] - - def getslice(self, start, stop, step, size): - if size == 0: - return "" - if step == 1: - assert 0 <= start <= stop - return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) -# ____________________________________________________________ - - -class SubBufferMixin(object): - _attrs_ = ['buffer', 'offset', 'size'] - - def __init__(self, buffer, offset, size): - self.buffer = buffer - self.offset = offset - self.size = size - - def getlength(self): - at_most = self.buffer.getlength() - self.offset - if 0 <= self.size <= at_most: - return self.size - elif at_most >= 0: - return at_most - else: - return 0 - - def getitem(self, index): - return self.buffer.getitem(self.offset + index) - - def getslice(self, start, stop, step, size): - if start == stop: - return '' # otherwise, adding self.offset might make them - # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, - step, size) - - -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) - - -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) - - def setitem(self, index, char): - self.buffer.setitem(self.offset + index, char) - - def setslice(self, start, string): - if len(string) == 0: - return # otherwise, adding self.offset might make 'start' - # out of bounds - self.buffer.setslice(self.offset + start, string) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -299,9 +299,13 @@ """ self._application_traceback = traceback - at specialize.memo() + +class ClearedOpErr: + def __init__(self, space): + self.operr = OperationError(space.w_None, space.w_None) + def get_cleared_operation_error(space): - return OperationError(space.w_None, space.w_None) + return space.fromcache(ClearedOpErr).operr # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -205,11 +205,14 @@ def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! """Implements sys.exc_info(). Return an OperationError instance or None.""" - frame = self.gettopframe_nohidden() + frame = self.gettopframe() while frame: if frame.last_exception is not None: - return frame.last_exception - frame = self.getnextframe_nohidden(frame) + if (not frame.hide() or + frame.last_exception is + get_cleared_operation_error(self.space)): + return frame.last_exception + frame = frame.f_backref() return None def set_sys_exc_info(self, operror): diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -15,10 +15,11 @@ space.setitem(w_modules, w_main, mainmodule) return mainmodule + def compilecode(space, source, filename, cmd='exec'): w = space.wrap - w_code = space.builtin.call('compile', - w(source), w(filename), w(cmd), w(0), w(0)) + w_code = space.builtin.call( + 'compile', w(source), w(filename), w(cmd), w(0), w(0)) pycode = space.interp_w(eval.Code, w_code) return pycode @@ -28,7 +29,7 @@ cmd = 'eval' else: cmd = 'exec' - + try: if space is None: from pypy.objspace.std import StdObjSpace @@ -55,18 +56,22 @@ operationerr.record_interpreter_traceback() raise + def run_string(source, filename=None, space=None): _run_eval_string(source, filename, space, False) + def eval_string(source, filename=None, space=None): return _run_eval_string(source, filename, space, True) + def run_file(filename, space=None): - if __name__=='__main__': + if __name__ == '__main__': print "Running %r with %r" % (filename, space) istring = open(filename).read() run_string(istring, filename, space) + def run_module(module_name, args, space=None): """Implements PEP 338 'Executing modules as scripts', overwriting sys.argv[1:] using `args` and executing the module `module_name`. @@ -89,7 +94,6 @@ return space.call_function(w_run_module, w(module_name), space.w_None, w('__main__'), space.w_True) -# ____________________________________________________________ def run_toplevel(space, f, verbose=False): """Calls f() and handle all OperationErrors. diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -881,8 +881,8 @@ def LOAD_NAME(self, nameindex, next_instr): if self.w_locals is not self.w_globals: - w_varname = self.getname_w(nameindex) - w_value = self.space.finditem(self.w_locals, w_varname) + varname = self.getname_u(nameindex) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py deleted file mode 100644 --- a/pypy/interpreter/test/test_buffer.py +++ /dev/null @@ -1,43 +0,0 @@ -import py -from rpython.tool.udir import udir - -testdir = udir.ensure('test_buffer', dir=1) - - -class TestBuffer: - def test_buffer_w(self): - space = self.space - w_hello = space.wrap('hello world') - buf = space.buffer_w(w_hello) - assert buf.getlength() == 11 - assert buf.as_str() == 'hello world' - assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.newbuffer(buf)) is buf - assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' - space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - - def test_file_write(self): - space = self.space - w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) - filename = str(testdir.join('test_file_write')) - space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): - f = open(filename, 'wb') - f.write(buffer) - f.close() - """) - f = open(filename, 'rb') - data = f.read() - f.close() - assert data == 'hello world' - - def test_unicode(self): - space = self.space - s = space.bufferstr_w(space.wrap(u'hello')) - assert type(s) is str - assert s == 'hello' - space.raises_w(space.w_UnicodeEncodeError, - space.bufferstr_w, space.wrap(u'\xe9')) - - -# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -22,22 +22,6 @@ compile; if absent or zero these statements do influence the compilation, in addition to any features explicitly specified. """ - - ast_node = None - w_ast_type = space.gettypeobject(ast.AST.typedef) - str_ = None - if space.isinstance_w(w_source, w_ast_type): - ast_node = space.interp_w(ast.mod, w_source) - ast_node.sync_app_attrs(space) - elif space.isinstance_w(w_source, space.w_unicode): - w_utf_8_source = space.call_method(w_source, "encode", - space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) - # This flag tells the parser to reject any coding cookies it sees. - flags |= consts.PyCF_SOURCE_IS_UTF8 - else: - str_ = space.str_w(w_source) - ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): @@ -53,14 +37,30 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - if ast_node is None: - if flags & consts.PyCF_ONLY_AST: - mod = ec.compiler.compile_to_ast(str_, filename, mode, flags) - return space.wrap(mod) - else: - code = ec.compiler.compile(str_, filename, mode, flags) + w_ast_type = space.gettypeobject(ast.AST.typedef) + if space.isinstance_w(w_source, w_ast_type): + ast_node = space.interp_w(ast.mod, w_source) + ast_node.sync_app_attrs(space) + code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + return space.wrap(code) + + if space.isinstance_w(w_source, space.w_unicode): + w_utf_8_source = space.call_method(w_source, "encode", + space.wrap("utf-8")) + str_ = space.str_w(w_utf_8_source) + # This flag tells the parser to reject any coding cookies it sees. + flags |= consts.PyCF_SOURCE_IS_UTF8 else: - code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + str_ = space.readbuf_w(w_source).as_str() + + if '\x00' in str_: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) + + if flags & consts.PyCF_ONLY_AST: + code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + else: + code = ec.compiler.compile(str_, filename, mode, flags) return space.wrap(code) diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -351,17 +351,17 @@ self.promote_step = promote_step def descr_new(space, w_subtype, w_start, w_stop=None, w_step=None): - start = _toint(space, w_start) + start = space.int_w(w_start) if space.is_none(w_step): # no step argument provided step = 1 promote_step = True else: - step = _toint(space, w_step) + step = space.int_w(w_step) promote_step = False if space.is_none(w_stop): # only 1 argument provided start, stop = 0, start else: - stop = _toint(space, w_stop) + stop = space.int_w(w_stop) howmany = get_len_of_range(space, start, stop, step) obj = space.allocate_instance(W_XRange, w_subtype) W_XRange.__init__(obj, space, start, howmany, step, promote_step) @@ -425,11 +425,6 @@ minint = -sys.maxint - 1 return minint if last < minint - step else last + step -def _toint(space, w_obj): - # this also supports float arguments. CPython still does, too. - # needs a bit more thinking in general... - return space.int_w(space.int(w_obj)) - W_XRange.typedef = TypeDef("xrange", __new__ = interp2app(W_XRange.descr_new.im_func), __repr__ = interp2app(W_XRange.descr_repr), @@ -441,6 +436,7 @@ ) W_XRange.typedef.acceptable_as_base_class = False + class W_XRangeIterator(W_Root): def __init__(self, space, current, remaining, step): self.space = space @@ -488,7 +484,10 @@ ) W_XRangeIterator.typedef.acceptable_as_base_class = False + class W_XRangeStepOneIterator(W_XRangeIterator): + _immutable_fields_ = ['stop'] + def __init__(self, space, start, stop): self.space = space self.current = start diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -311,14 +311,14 @@ def test_xrange_len(self): x = xrange(33) assert len(x) == 33 - x = xrange(33.2) - assert len(x) == 33 + exc = raises(TypeError, xrange, 33.2) + assert "integer" in str(exc.value) x = xrange(33,0,-1) assert len(x) == 33 x = xrange(33,0) assert len(x) == 0 - x = xrange(33,0.2) - assert len(x) == 0 + exc = raises(TypeError, xrange, 33, 0.2) + assert "integer" in str(exc.value) x = xrange(0,33) assert len(x) == 33 x = xrange(0,33,-1) @@ -490,6 +490,14 @@ def test_compile(self): co = compile('1+2', '?', 'eval') assert eval(co) == 3 + co = compile(buffer('1+2'), '?', 'eval') + assert eval(co) == 3 + exc = raises(TypeError, compile, chr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, unichr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, memoryview('1+2'), '?', 'eval') + assert str(exc.value) == "expected a readable buffer object" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') raises(ValueError, compile, '"\\xt"', '?', 'eval') diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -1,5 +1,4 @@ class AppTestMap: - def test_trivial_map_one_seq(self): assert map(lambda x: x+2, [1, 2, 3, 4]) == [3, 4, 5, 6] @@ -77,6 +76,7 @@ assert result == [(2, 7), (1, 6), (None, 5), (None, 4), (None, 3), (None, 2), (None, 1)] + class AppTestZip: def test_one_list(self): assert zip([1,2,3]) == [(1,), (2,), (3,)] @@ -93,6 +93,7 @@ yield None assert zip(Foo()) == [] + class AppTestReduce: def test_None(self): raises(TypeError, reduce, lambda x, y: x+y, [1,2,3], None) @@ -105,6 +106,7 @@ assert reduce(lambda x, y: x-y, [10, 2, 8]) == 0 assert reduce(lambda x, y: x-y, [2, 8], 10) == 0 + class AppTestFilter: def test_None(self): assert filter(None, ['a', 'b', 1, 0, None]) == ['a', 'b', 1] @@ -125,6 +127,7 @@ return i * 10 assert filter(lambda x: x != 20, T("abcd")) == (0, 10, 30) + class AppTestXRange: def test_xrange(self): x = xrange(2, 9, 3) @@ -155,7 +158,8 @@ assert list(xrange(0, 10, A())) == [0, 5] def test_xrange_float(self): - assert list(xrange(0.1, 2.0, 1.1)) == [0, 1] + exc = raises(TypeError, xrange, 0.1, 2.0, 1.1) + assert "integer" in str(exc.value) def test_xrange_long(self): import sys @@ -218,6 +222,7 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) + class AppTestApply: def test_apply(self): def f(*args, **kw): @@ -228,6 +233,7 @@ assert apply(f, args) == (args, {}) assert apply(f, args, kw) == (args, kw) + class AppTestAllAny: """ These are copied directly and replicated from the Python 2.5 source code. @@ -277,6 +283,7 @@ S = [10, 20, 30] assert any([x > 42 for x in S]) == False + class AppTestMinMax: def test_min(self): assert min(1, 2) == 1 diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,13 +2,16 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec -class ByteBuffer(RWBuffer): +class ByteBuffer(Buffer): + _immutable_ = True + def __init__(self, len): self.data = ['\x00'] * len + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -13,3 +13,18 @@ assert b[-1] == '*' assert b[-2] == '-' assert b[-3] == '+' + exc = raises(TypeError, "b[3] = 'abc'") + assert str(exc.value) == "right operand must be a single byte" + exc = raises(TypeError, "b[3:5] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + exc = raises(TypeError, "b[3:7:2] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + + b = bytebuffer(10) + b[1:3] = 'xy' + assert str(b) == "\x00xy" + "\x00" * 7 + b[4:8:2] = 'zw' + assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 + r = str(buffer(u'#')) + b[6:6+len(r)] = u'#' + assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,21 +1,22 @@ -from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer +from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw -class LLBuffer(RWBuffer): +class LLBuffer(Buffer): _immutable_ = True def __init__(self, raw_cdata, size): self.raw_cdata = raw_cdata self.size = size + self.readonly = False def getlength(self): return self.size @@ -32,7 +33,7 @@ def getslice(self, start, stop, step, size): if step == 1: return rffi.charpsize2str(rffi.ptradd(self.raw_cdata, start), size) - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) @@ -46,6 +47,14 @@ W_Buffer.__init__(self, buffer) self.keepalive = keepalive + def descr_setitem(self, space, w_index, w_obj): + try: + W_Buffer.descr_setitem(self, space, w_index, w_obj) + except OperationError as e: + if e.match(space, space.w_TypeError): + e.w_type = space.w_ValueError + raise + MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -72,8 +72,8 @@ 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', - 'charbuffer_encode': 'interp_codecs.buffer_encode', - 'readbuffer_encode': 'interp_codecs.buffer_encode', + 'charbuffer_encode': 'interp_codecs.charbuffer_encode', + 'readbuffer_encode': 'interp_codecs.readbuffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', 'charmap_encode' : 'interp_codecs.charmap_encode', 'escape_encode' : 'interp_codecs.escape_encode', diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -321,8 +321,14 @@ w_res = space.call_function(w_encoder, w_obj, space.wrap(errors)) return space.getitem(w_res, space.wrap(0)) - at unwrap_spec(s='bufferstr', errors='str_or_None') -def buffer_encode(space, s, errors='strict'): + at unwrap_spec(errors='str_or_None') +def readbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('s#', w_data) + return space.newtuple([space.wrap(s), space.wrap(len(s))]) + + at unwrap_spec(errors='str_or_None') +def charbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('t#', w_data) return space.newtuple([space.wrap(s), space.wrap(len(s))]) @unwrap_spec(errors=str) @@ -673,7 +679,7 @@ if space.isinstance_w(w_string, space.w_unicode): return space.newtuple([w_string, space.len(w_string)]) - string = space.str_w(w_string) + string = space.readbuf_w(w_string).as_str() if len(string) == 0: return space.newtuple([space.wrap(u''), space.wrap(0)]) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -276,7 +276,7 @@ assert enc == "a\x00\x00\x00" def test_unicode_internal_decode(self): - import sys + import sys, _codecs, array if sys.maxunicode == 65535: # UCS2 build if sys.byteorder == "big": bytes = "\x00a" @@ -291,6 +291,9 @@ bytes2 = "\x98\x00\x01\x00" assert bytes2.decode("unicode_internal") == u"\U00010098" assert bytes.decode("unicode_internal") == u"a" + assert _codecs.unicode_internal_decode(array.array('c', bytes))[0] == u"a" + exc = raises(TypeError, _codecs.unicode_internal_decode, memoryview(bytes)) + assert str(exc.value) == "expected a readable buffer object" def test_raw_unicode_escape(self): assert unicode("\u0663", "raw-unicode-escape") == u"\u0663" @@ -420,9 +423,13 @@ for (i, line) in enumerate(reader): assert line == s[i] - def test_array(self): + def test_buffer_encode(self): import _codecs, array - _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + assert _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + exc = raises(TypeError, _codecs.charbuffer_encode, array.array('c', 'spam')) + assert str(exc.value) == "must be string or read-only character buffer, not array.array" + assert _codecs.readbuffer_encode(u"test") == ('test', 4) + assert _codecs.charbuffer_encode(u"test") == ('test', 4) def test_utf8sig(self): import codecs diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -1,6 +1,6 @@ from rpython.rlib.rstacklet import StackletThread from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef @@ -39,6 +39,7 @@ bottomframe.locals_stack_w[1] = w_callable bottomframe.locals_stack_w[2] = w_args bottomframe.locals_stack_w[3] = w_kwds + bottomframe.last_exception = get_cleared_operation_error(space) self.bottomframe = bottomframe # global_state.origin = self diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -684,3 +684,17 @@ execfile(self.translated, d) d['set_fast_mode']() d['test_various_depths']() + + def test_exc_info_doesnt_follow_continuations(self): + import sys + from _continuation import continulet + # + def f1(c1): + return sys.exc_info() + # + c1 = continulet(f1) + try: + 1 // 0 + except ZeroDivisionError: + got = c1.switch() + assert got == (None, None, None) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -267,9 +267,14 @@ def direct_write(self, w_data): space = self.space - if not self.binary and space.isinstance_w(w_data, space.w_unicode): - w_data = space.call_method(w_data, "encode", space.wrap(self.encoding), space.wrap(self.errors)) - data = space.bufferstr_w(w_data) + if self.binary: + data = space.getarg_w('s*', w_data).as_str() + else: + if space.isinstance_w(w_data, space.w_unicode): + w_data = space.call_method(w_data, "encode", + space.wrap(self.encoding), + space.wrap(self.errors)) + data = space.charbuf_w(w_data) self.do_direct_write(data) def do_direct_write(self, data): @@ -455,21 +460,24 @@ space = self.space self.check_closed() - w_iterator = space.iter(w_lines) - while True: - try: - w_line = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done + lines = space.fixedview(w_lines) + for i, w_line in enumerate(lines): + if not space.isinstance_w(w_line, space.w_str): + try: + line = w_line.charbuf_w(space) + except TypeError: + raise OperationError(space.w_TypeError, space.wrap( + "writelines() argument must be a sequence of strings")) + else: + lines[i] = space.wrap(line) + for w_line in lines: self.file_write(w_line) def file_readinto(self, w_rwbuffer): """readinto() -> Undocumented. Don't use this; it may go away.""" # XXX not the most efficient solution as it doesn't avoid the copying space = self.space - rwbuffer = space.rwbuffer_w(w_rwbuffer) + rwbuffer = space.writebuf_w(w_rwbuffer) w_data = self.file_read(rwbuffer.getlength()) data = space.str_w(w_data) rwbuffer.setslice(0, data) diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -386,6 +386,32 @@ assert len(somelines) > 200 assert somelines == lines[:len(somelines)] + def test_writelines(self): + import array + fn = self.temptestfile + with file(fn, 'w') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'r').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + exc = raises(TypeError, f.writelines, ['abc', memoryview('def')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == [] + def test_nasty_writelines(self): # The stream lock should be released between writes fn = self.temptestfile diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -80,7 +80,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) @@ -101,11 +101,14 @@ readinto = interp2app(W_BufferedIOBase.readinto_w), ) -class RawBuffer(RWBuffer): +class RawBuffer(Buffer): + _immutable_ = True + def __init__(self, buf, start, length): self.buf = buf self.start = start self.length = length + self.readonly = False def getlength(self): return self.length @@ -698,7 +701,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -41,7 +41,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) size = rwbuffer.getlength() output = self.read(size) @@ -50,10 +50,7 @@ def write_w(self, space, w_data): self._check_closed(space) - if space.isinstance_w(w_data, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - "bytes string of buffer expected")) - buf = space.bufferstr_w(w_data) + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -333,7 +333,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() try: n = os.write(self.fd, data) @@ -366,7 +366,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,14 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\ncxxxxx' @@ -235,7 +243,8 @@ import _io raw = _io.FileIO(self.tmpfile, 'w') f = _io.BufferedWriter(raw) - f.write("abcd") + f.write("ab") + f.write(u"cd") f.close() assert self.readfile() == "abcd" diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -38,6 +38,8 @@ f = _io.BytesIO() assert f.write("") == 0 assert f.write("hello") == 5 + exc = raises(TypeError, f.write, u"lo") + assert str(exc.value) == "'unicode' does not have the buffer interface" import gc; gc.collect() assert f.getvalue() == "hello" f.close() @@ -97,6 +99,14 @@ a2 = bytearray('testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, b.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() assert a1 == "h" assert a2 == "elloing" diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -82,7 +82,8 @@ import _io filename = self.tmpfile + '_w' f = _io.FileIO(filename, 'wb') - f.write("test") + f.write("te") + f.write(u"st") # try without flushing f2 = _io.FileIO(filename, 'rb') assert f2.read() == "test" @@ -135,6 +136,14 @@ a = bytearray('x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\nc\0\0\0\0\0' # diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -80,8 +80,9 @@ raise OperationError(space.w_IOError, space.wrap("connection is read-only")) - @unwrap_spec(buf='bufferstr', offset='index', size='index') - def send_bytes(self, space, buf, offset=0, size=PY_SSIZE_T_MIN): + @unwrap_spec(offset='index', size='index') + def send_bytes(self, space, w_buf, offset=0, size=PY_SSIZE_T_MIN): + buf = space.getarg_w('s*', w_buf).as_str() length = len(buf) self._check_writable(space) if offset < 0: @@ -122,7 +123,7 @@ @unwrap_spec(offset='index') def recv_bytes_into(self, space, w_buffer, offset=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) length = rwbuffer.getlength() res, newbuf = self.do_recv_string( @@ -149,7 +150,7 @@ w_pickled = space.call_method( w_picklemodule, "dumps", w_obj, w_protocol) - buf = space.bufferstr_w(w_pickled) + buf = space.str_w(w_pickled) self.do_send_string(space, buf, 0, len(buf)) def recv(self, space): diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -193,7 +193,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.bufferstr_w(w_value) + value = space.str_w(w_value) if start + len(value) != stop: raise OperationError(space.w_ValueError, space.wrap("cannot resize array")) diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,12 +1,14 @@ -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer # XXX not the most efficient implementation -class RawFFIBuffer(RWBuffer): +class RawFFIBuffer(Buffer): + _immutable_ = True def __init__(self, datainstance): self.datainstance = datainstance + self.readonly = False def getlength(self): return self.datainstance.getrawsize() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -16,6 +16,7 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.rarithmetic import intmask, r_uint +from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker TYPEMAP = { @@ -352,8 +353,13 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def buffer_w(self, space): - from pypy.module._rawffi.buffer import RawFFIBuffer + def buffer_w(self, space, flags): + return RawFFIBuffer(self) + + def readbuf_w(self, space): + return RawFFIBuffer(self) + + def writebuf_w(self, space): return RawFFIBuffer(self) def getrawsize(self): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1100,6 +1100,12 @@ assert a[3] == 'z' assert a[4] == 't' + b = memoryview(a) + assert len(b) == 10 + assert b[3] == 'z' + b[3] = 'x' + assert b[3] == 'x' + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -419,7 +419,7 @@ @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt @@ -430,7 +430,7 @@ @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -545,8 +545,12 @@ s.connect(("www.python.org", 80)) except _socket.gaierror, ex: skip("GAIError - probably no connection: %s" % str(ex.args)) + exc = raises(TypeError, s.send, None) + assert str(exc.value) == "must be string or buffer, not None" assert s.send(buffer('')) == 0 assert s.sendall(buffer('')) is None + assert s.send(memoryview('')) == 0 + assert s.sendall(memoryview('')) is None assert s.send(u'') == 0 assert s.sendall(u'') is None raises(UnicodeEncodeError, s.send, u'\xe9') @@ -678,6 +682,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes = cli.recv_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_recvfrom_into(self): import socket import array @@ -693,6 +704,13 @@ msg = buf.tostring()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes, addr = cli.recvfrom_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -34,8 +34,8 @@ def slice_w(space, ctx, start, end, w_default): if 0 <= start <= end: - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string[start:end]) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,7 +98,7 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a StrMatchContext or a UnicodeMatchContext for searching + """Make a BufMatchContext or a UnicodeMatchContext for searching in the given w_string object.""" space = self.space if pos < 0: @@ -114,12 +114,14 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - str = space.bufferstr_w(w_string) - if pos > len(str): - pos = len(str) - if endpos > len(str): - endpos = len(str) - return rsre_core.StrMatchContext(self.code, str, + buf = space.readbuf_w(w_string) + size = buf.getlength() + assert size >= 0 + if pos > size: + pos = size + if endpos > size: + endpos = size + return rsre_core.BufMatchContext(self.code, buf, pos, endpos, self.flags) def getmatch(self, ctx, found): @@ -477,8 +479,8 @@ def fget_string(self, space): ctx = self.ctx - if isinstance(ctx, rsre_core.StrMatchContext): - return space.wrap(ctx._string) + if isinstance(ctx, rsre_core.BufMatchContext): + return space.wrap(ctx._buffer.as_str()) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError, wrap_windowserror +from pypy.interpreter.error import OperationError, wrap_windowserror, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32 from rpython.rlib.rarithmetic import r_uint, intmask @@ -327,7 +327,14 @@ buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') buf[0] = '\0' else: - value = space.bufferstr_w(w_value) + try: + value = w_value.readbuf_w(space) + except TypeError: + raise oefmt(space.w_TypeError, + "Objects of type '%T' can not be used as binary " + "registry values", w_value) + else: + value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -137,11 +137,15 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from _winreg import CreateKey, SetValueEx + from _winreg import CreateKey, SetValueEx, REG_BINARY key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) + exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, + REG_BINARY, memoryview('abc')) + assert str(exc.value) == ("Objects of type 'memoryview' can not " + "be used as binary registry values") def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -1,6 +1,7 @@ from __future__ import with_statement from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable @@ -9,7 +10,6 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( interp2app, interpindirect2app, unwrap_spec) @@ -42,7 +42,7 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] if space.type(w_initializer) is space.w_str: - a.descr_fromstring(space, space.str_w(w_initializer)) + a.descr_fromstring(space, w_initializer) elif space.type(w_initializer) is space.w_list: a.descr_fromlist(space, w_initializer) else: @@ -132,8 +132,11 @@ self.len = 0 self.allocated = 0 - def buffer_w(self, space): - return ArrayBuffer(self) + def readbuf_w(self, space): + return ArrayBuffer(self, True) + + def writebuf_w(self, space): + return ArrayBuffer(self, False) def descr_append(self, space, w_x): """ append(x) @@ -229,13 +232,13 @@ self._charbuf_stop() return self.space.wrap(s) - @unwrap_spec(s=str) - def descr_fromstring(self, space, s): + def descr_fromstring(self, space, w_s): """ fromstring(string) Appends items from the string, interpreting it as an array of machine values,as if it had been read from a file using the fromfile() method). """ + s = space.getarg_w('s#', w_s) if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) @@ -267,10 +270,10 @@ elems = max(0, len(item) - (len(item) % self.itemsize)) if n != 0: item = item[0:elems] - self.descr_fromstring(space, item) + self.descr_fromstring(space, space.wrap(item)) msg = "not enough items in file" raise OperationError(space.w_EOFError, space.wrap(msg)) - self.descr_fromstring(space, item) + self.descr_fromstring(space, w_item) @unwrap_spec(w_f=W_File) def descr_tofile(self, space, w_f): @@ -583,9 +586,12 @@ v.typecode = k unroll_typecodes = unrolling_iterable(types.keys()) -class ArrayBuffer(RWBuffer): - def __init__(self, array): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, array, readonly): self.array = array + self.readonly = readonly def getlength(self): return self.array.len * self.array.itemsize diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -155,6 +155,11 @@ a.fromstring('Hi!') assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3 a = self.array('c') + a.fromstring(buffer('xyz')) + exc = raises(TypeError, a.fromstring, memoryview('xyz')) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + assert a[0] == 'x' and a[1] == 'y' and a[2] == 'z' and len(a) == 3 + a = self.array('c') a.fromstring('') assert not len(a) @@ -421,12 +426,8 @@ def test_buffer_write(self): a = self.array('c', 'hello') buf = buffer(a) - print repr(buf) - try: From noreply at buildbot.pypy.org Mon Apr 28 11:07:55 2014 From: noreply at buildbot.pypy.org (asmosoinio) Date: Mon, 28 Apr 2014 11:07:55 +0200 (CEST) Subject: [pypy-commit] pypy asmosoinio/fixed-pip-installation-url-github-githu-1398674840188: fixed pip installation URL (github => githubusercontent) Message-ID: <20140428090755.B606A1C0705@cobra.cs.uni-duesseldorf.de> Author: Asmo Soinio Branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 Changeset: r71038:79ed26738adf Date: 2014-04-28 09:03 +0000 http://bitbucket.org/pypy/pypy/changeset/79ed26738adf/ Log: fixed pip installation URL (github => githubusercontent) diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -76,7 +76,7 @@ .. code-block:: console $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example From noreply at buildbot.pypy.org Mon Apr 28 11:07:56 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 28 Apr 2014 11:07:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in asmosoinio/pypy/asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 (pull request #235) Message-ID: <20140428090756.DFA1D1C0705@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71039:554df1802294 Date: 2014-04-28 02:06 -0700 http://bitbucket.org/pypy/pypy/changeset/554df1802294/ Log: Merged in asmosoinio/pypy/asmosoinio/fixed-pip-installation-url- github-githu-1398674840188 (pull request #235) fixed pip installation URL (github => githubusercontent) diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -76,7 +76,7 @@ .. code-block:: console $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example From noreply at buildbot.pypy.org Mon Apr 28 13:48:55 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 28 Apr 2014 13:48:55 +0200 (CEST) Subject: [pypy-commit] benchmarks default: tweak multithreaded raytrace a bit Message-ID: <20140428114855.8F6771C01F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r253:bec3da064c27 Date: 2014-04-24 15:04 +0200 http://bitbucket.org/pypy/benchmarks/changeset/bec3da064c27/ Log: tweak multithreaded raytrace a bit diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -1,7 +1,7 @@ # From http://www.reddit.com/r/tinycode/comments/169ri9/ray_tracer_in_140_sloc_of_python_with_picture/ # Date: 14.03.2013 -from math import sqrt, pow, pi +from math import sqrt, pi from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool import time @@ -125,14 +125,14 @@ -def task(x, h, cameraPos, objs, lightSource): - time.sleep(0) # XXX - with atomic: - for y in range(h): +def task(img, x, h, cameraPos, objs, lightSource): + line = img[x] + for y in range(h): + with atomic: ray = Ray(cameraPos, (Vector(x/50.0-5,y/50.0-5,0)-cameraPos).normal()) - trace(ray, objs, lightSource, 10) - time.sleep(0) # XXX + col = trace(ray, objs, lightSource, 10) + line[y] = (col.x + col.y + col.z) / 3.0 return x @@ -157,9 +157,11 @@ lightSource = Vector(0,10,0) cameraPos = Vector(0,0,20) - + img = [] for x in range(w): - future_dispatcher(ths, x, h, cameraPos, objs, lightSource) + img.append([0.0] * h) + for x in range(w): + future_dispatcher(ths, img, x, h, cameraPos, objs, lightSource) for f in futures: print f() From noreply at buildbot.pypy.org Mon Apr 28 13:48:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 28 Apr 2014 13:48:56 +0200 (CEST) Subject: [pypy-commit] benchmarks default: a few tweaks Message-ID: <20140428114856.E78851C01F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r254:de389b63b99c Date: 2014-04-28 13:50 +0200 http://bitbucket.org/pypy/benchmarks/changeset/de389b63b99c/ Log: a few tweaks diff --git a/multithread/btree/btree.py b/multithread/btree/btree.py --- a/multithread/btree/btree.py +++ b/multithread/btree/btree.py @@ -5,8 +5,6 @@ import random -thread_local = threading.local() - import bisect @@ -195,6 +193,9 @@ def insert(self, item): ancestors = self._path_to(item) + if self._present(item, ancestors): + return False + node, index = ancestors[-1] while getattr(node, "children", None): node = node.children[index] @@ -202,6 +203,7 @@ ancestors.append((node, index)) node, index = ancestors.pop() node.insert(index, item, ancestors) + return True def remove(self, item): ancestors = self._path_to(item) @@ -301,18 +303,22 @@ self._root = self.BRANCH(self, contents=seps, children=levels[-1]) +###################################################################### +###################################################################### +###################################################################### + OPS = [BTree.__contains__] * 98 + [BTree.insert, BTree.remove] +ITEM_RANGE = 10000 def task(id, tree, ops): print "start task with %s ops" % ops r = random.Random() r.seed(id) - thread_local.rnd = r for _ in xrange(ops): op = r.choice(OPS) - elem = r.randint(1, 10000) + elem = r.randint(1, ITEM_RANGE) with atomic: op(tree, elem) @@ -331,11 +337,10 @@ operations = int(operations) set_thread_pool(ThreadPool(threads)) - thread_local.rnd = random tree = BTree(20) for _ in xrange(1000): - tree.insert(random.randint(1, 1000)) + tree.insert(random.randint(1, ITEM_RANGE)) c_len = operations // threads fs = [] diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -1,6 +1,6 @@ from Queue import Queue, Empty, Full -from threading import Thread, Condition, Lock -import thread, atexit, sys +from threading import Thread, Condition, Lock, local +import thread, atexit, sys, time try: from __pypy__.thread import atomic, getsegmentlimit @@ -10,6 +10,31 @@ return 1 +class TLQueue(object): + def __init__(self): + self.items = [] + self._new_items = Condition() + + def put(self, v): + self.items.append(v) + with self._new_items: + self._new_items.notify_all() + + def get(self): + items = self.items + with atomic: + if items: + return items.pop() + + while True: + with self._new_items: + with atomic: + if items: + return items.pop() + + self._new_items.wait() + + class Worker(Thread): """Thread executing tasks from a given tasks queue""" def __init__(self, queue): @@ -29,7 +54,7 @@ class ThreadPool(object): def __init__(self, n_workers=None): - self.input_queue = Queue() + self.input_queue = TLQueue() if n_workers is None: n_workers = getsegmentlimit() self.workers = [Worker(self.input_queue) for i in range(n_workers)] diff --git a/multithread/skiplist/skiplist.py b/multithread/skiplist/skiplist.py --- a/multithread/skiplist/skiplist.py +++ b/multithread/skiplist/skiplist.py @@ -86,6 +86,7 @@ OPS = [SkipList.find] * 98 + [SkipList.insert, SkipList.remove] +ITEM_RANGE = 10000 def task(id, slist, ops): print "start task with %s ops" % ops @@ -95,7 +96,7 @@ for _ in xrange(ops): op = r.choice(OPS) - elem = r.randint(1, 10000) + elem = r.randint(1, ITEM_RANGE) with atomic: op(slist, elem) @@ -118,7 +119,7 @@ slist = SkipList() for _ in xrange(1000): - slist.insert(random.randint(1, 1000)) + slist.insert(random.randint(1, ITEM_RANGE)) c_len = operations // threads fs = [] From noreply at buildbot.pypy.org Mon Apr 28 16:04:23 2014 From: noreply at buildbot.pypy.org (xando) Date: Mon, 28 Apr 2014 16:04:23 +0200 (CEST) Subject: [pypy-commit] pypy default: rlib parsing token_class extended with token_position_class Message-ID: <20140428140423.DE6291D2BBD@cobra.cs.uni-duesseldorf.de> Author: Sebastian Pawlu? Branch: Changeset: r71040:01046366784a Date: 2014-04-28 15:48 +0200 http://bitbucket.org/pypy/pypy/changeset/01046366784a/ Log: rlib parsing token_class extended with token_position_class diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -107,7 +107,7 @@ self.matcher = matcher self.lineno = 0 self.columnno = 0 - + def find_next_token(self): while 1: self.state = 0 @@ -126,8 +126,8 @@ i = ~i stop = self.last_matched_index + 1 assert stop >= 0 - if start == stop: - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + if start == stop: + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) source = self.text[start:stop] @@ -147,7 +147,7 @@ else: raise StopIteration return result - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) def adjust_position(self, token): @@ -158,7 +158,7 @@ self.columnno += len(token) else: self.columnno = token.rfind("\n") - + # def inner_loop(self, i): # while i < len(self.text): # char = self.text[i] @@ -186,10 +186,15 @@ class LexingDFARunner(AbstractLexingDFARunner): def __init__(self, matcher, automaton, text, ignore, eof=False, token_class=None): - if token_class is None: + + if not token_class: self.token_class = Token + self.token_position_class = SourcePos + else: self.token_class = token_class + self.token_position_class = token_class.source_position_class + AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -198,7 +203,8 @@ def make_token(self, index, state, text, eof=False): assert (eof and state == -1) or 0 <= state < len(self.automaton.names) - source_pos = SourcePos(index, self.lineno, self.columnno) + + source_pos = self.token_position_class(index, self.lineno, self.columnno) if eof: return self.token_class("EOF", "EOF", source_pos) return self.token_class(self.automaton.names[self.last_matched_state], From noreply at buildbot.pypy.org Mon Apr 28 16:04:25 2014 From: noreply at buildbot.pypy.org (xando) Date: Mon, 28 Apr 2014 16:04:25 +0200 (CEST) Subject: [pypy-commit] pypy lexer_token_position_class: new line Message-ID: <20140428140425.11DB71D2BBD@cobra.cs.uni-duesseldorf.de> Author: Sebastian Pawlu? Branch: lexer_token_position_class Changeset: r71041:401c05e2904d Date: 2014-04-28 15:55 +0200 http://bitbucket.org/pypy/pypy/changeset/401c05e2904d/ Log: new line diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -207,5 +207,6 @@ source_pos = self.token_position_class(index, self.lineno, self.columnno) if eof: return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], text, source_pos) From noreply at buildbot.pypy.org Mon Apr 28 16:04:26 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 28 Apr 2014 16:04:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in xando/pypy/lexer_token_position_class (pull request #236) Message-ID: <20140428140426.2AF031D2BBD@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71042:f725787e0d52 Date: 2014-04-28 16:02 +0200 http://bitbucket.org/pypy/pypy/changeset/f725787e0d52/ Log: Merged in xando/pypy/lexer_token_position_class (pull request #236) Extending LexingDFARunner with custom token_position_class diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -107,7 +107,7 @@ self.matcher = matcher self.lineno = 0 self.columnno = 0 - + def find_next_token(self): while 1: self.state = 0 @@ -126,8 +126,8 @@ i = ~i stop = self.last_matched_index + 1 assert stop >= 0 - if start == stop: - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + if start == stop: + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) source = self.text[start:stop] @@ -147,7 +147,7 @@ else: raise StopIteration return result - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) def adjust_position(self, token): @@ -158,7 +158,7 @@ self.columnno += len(token) else: self.columnno = token.rfind("\n") - + # def inner_loop(self, i): # while i < len(self.text): # char = self.text[i] @@ -186,10 +186,15 @@ class LexingDFARunner(AbstractLexingDFARunner): def __init__(self, matcher, automaton, text, ignore, eof=False, token_class=None): - if token_class is None: + + if not token_class: self.token_class = Token + self.token_position_class = SourcePos + else: self.token_class = token_class + self.token_position_class = token_class.source_position_class + AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -198,8 +203,10 @@ def make_token(self, index, state, text, eof=False): assert (eof and state == -1) or 0 <= state < len(self.automaton.names) - source_pos = SourcePos(index, self.lineno, self.columnno) + + source_pos = self.token_position_class(index, self.lineno, self.columnno) if eof: return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], text, source_pos) From noreply at buildbot.pypy.org Mon Apr 28 17:04:36 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 28 Apr 2014 17:04:36 +0200 (CEST) Subject: [pypy-commit] benchmarks default: more tweaks Message-ID: <20140428150436.7AF7A1C01CB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r255:2193a2976d5b Date: 2014-04-28 16:35 +0200 http://bitbucket.org/pypy/benchmarks/changeset/2193a2976d5b/ Log: more tweaks diff --git a/multithread/bench.py b/multithread/bench.py --- a/multithread/bench.py +++ b/multithread/bench.py @@ -34,7 +34,7 @@ def get_error(times): ts = sorted(times)[:args.k] best = float(ts[0]) - + return max((t / best) - 1.0 for t in ts) def within_error(args, times): @@ -51,6 +51,7 @@ test = import_file(os.path.basename(args.file)) times = [] + results = [] k = 1 try: while True: @@ -60,14 +61,15 @@ test_time = time.time() if args.p: - test.run(*args.more) + results.append(test.run(*args.more)) else: with nostdout(): - test.run(*args.more) + results.append(test.run(*args.more)) times.append(time.time() - test_time) if not args.q: print "took {} s".format(times[-1]) + print "returned", results[-1] if k >= args.k: if within_error(args, times): @@ -83,7 +85,9 @@ k += 1 finally: if not args.q: - print "times:", times + print "== times ==\n", "\n".join(map(str, times)) + print "== reported results ==\n", "\n".join( + map(str, filter(None, results))) if times: times = sorted(times)[:args.k] diff --git a/multithread/btree/btree.py b/multithread/btree/btree.py --- a/multithread/btree/btree.py +++ b/multithread/btree/btree.py @@ -344,16 +344,18 @@ c_len = operations // threads fs = [] + parallel_time = time.time() for i in xrange(threads): fs.append(Future(task, i, tree, c_len)) for f in fs: f() - + parallel_time = time.time() - parallel_time # print "tree:" # print tree # shutdown current pool set_thread_pool(None) + return parallel_time diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -10,6 +10,49 @@ return 1 +class TLQueue_concurrent(object): + def __init__(self): + my_id = thread.get_ident() + self._tl_items = {my_id : []} + self._new_items = Condition() + self._c = 0 + + def put(self, v): + # conflicts with any put() and get()s from + # the chosen queue: + c = (id(v) // 5) % len(self._tl_items) + items = self._tl_items.values()[c] + + with self._new_items: + items.append(v) + self._new_items.notify_all() + + def _get_my_items(self): + my_id = thread.get_ident() + try: + items = self._tl_items[my_id] + except KeyError: + items = [] + self._tl_items[my_id] = items + return items + + def get(self): + # tries first to get item from its + # own thread-local queue + items = self._get_my_items() + with atomic: + if items: + return items.pop() + + while True: + with self._new_items: + # steal from other queues + for its in self._tl_items.values(): + with atomic: + if its: + return its.pop() + self._new_items.wait() + class TLQueue(object): def __init__(self): self.items = [] diff --git a/multithread/mandelbrot/mandelbrot.py b/multithread/mandelbrot/mandelbrot.py --- a/multithread/mandelbrot/mandelbrot.py +++ b/multithread/mandelbrot/mandelbrot.py @@ -1,5 +1,5 @@ from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool -import sys +import sys, time def calculate(a, b, im_size, max_iter=255): @@ -11,8 +11,7 @@ real_step = (br - ar) / (width - 1) print "real/width:%s, imag/height:%s" % (real_step, imag_step) - with atomic: - result = [[0] * width for y in xrange(height)] + result = [[0] * width for y in xrange(height)] for y in xrange(height): zi = ai + y * imag_step for x in xrange(width): @@ -64,6 +63,7 @@ res = [] ai = -1.5 bi = ai + step + parallel_time = time.time() for i in xrange(threads): res.append(Future(calculate, a=(ar, ai + i * step), @@ -72,9 +72,11 @@ )) res = [f() for f in res] + parallel_time = time.time() - parallel_time set_thread_pool(None) - return merge_imgs(res) + merge_imgs(res) + return parallel_time diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -160,15 +160,18 @@ img = [] for x in range(w): img.append([0.0] * h) + parallel_time = time.time() for x in range(w): future_dispatcher(ths, img, x, h, cameraPos, objs, lightSource) for f in futures: print f() del futures[:] + parallel_time = time.time() - parallel_time # shutdown current pool set_thread_pool(None) + return parallel_time diff --git a/multithread/skiplist/skiplist.py b/multithread/skiplist/skiplist.py --- a/multithread/skiplist/skiplist.py +++ b/multithread/skiplist/skiplist.py @@ -123,16 +123,19 @@ c_len = operations // threads fs = [] + parallel_time = time.time() for i in xrange(threads): fs.append(Future(task, i, slist, c_len)) for f in fs: f() + parallel_time = time.time() - parallel_time # print "list:" # slist.printList() # shutdown current pool set_thread_pool(None) + return parallel_time diff --git a/multithread/threadworms/threadworms.py b/multithread/threadworms/threadworms.py --- a/multithread/threadworms/threadworms.py +++ b/multithread/threadworms/threadworms.py @@ -157,6 +157,7 @@ # Create the worm objects. worms = [] # a list that contains all the worm objects + parallel_time = time.time() for i in range(NUM_WORMS): worms.append(Worm()) for w in worms: @@ -164,6 +165,8 @@ for t in worms: t.join() + parallel_time = time.time() - parallel_time + return parallel_time From noreply at buildbot.pypy.org Mon Apr 28 17:16:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 17:16:15 +0200 (CEST) Subject: [pypy-commit] stmgc marker: in-progress Message-ID: <20140428151615.F09FC1D2933@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1186:a8f77868840a Date: 2014-04-28 15:40 +0200 http://bitbucket.org/pypy/stmgc/changeset/a8f77868840a/ Log: in-progress diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -196,6 +196,10 @@ /* We have to signal the other thread to abort, and wait until it does. */ contmgr.other_pseg->pub.nursery_end = abort_category; + if (kind == WRITE_WRITE_CONTENTION) { + //marker_fetch_obj_write(contmgr.other_pseg->pub.segment_num, + // obj, contmgr.other_pseg->...); + } int sp = contmgr.other_pseg->safe_point; switch (sp) { diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -174,7 +174,7 @@ /* Temporarily stores the marker information */ char marker_self[_STM_MARKER_LEN]; char marker_other[_STM_MARKER_LEN]; - uintptr_t marker_inev[2]; + uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ }; enum /* safe_point */ { diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -32,10 +32,9 @@ static void marker_expand(uintptr_t marker[2], char *segment_base, char *outmarker) { + outmarker[0] = 0; if (marker[0] == 0) return; /* no marker entry found */ - if (outmarker[0] != 0) - return; /* already collected an entry */ if (stmcb_expand_marker != NULL) { stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], outmarker, _STM_MARKER_LEN); @@ -44,9 +43,13 @@ static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) { + if (pseg->marker_self[0] != 0) + return; /* already collected an entry */ + uintptr_t marker[2]; marker_fetch(pseg->pub.running_thread, marker); marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); + pseg->marker_other[0] = 0; } char *_stm_expand_marker(void) @@ -85,33 +88,11 @@ pseg->marker_other[0] = 0; } -static void marker_lookup_from_thread(struct stm_priv_segment_info_s *pseg, - object_t *obj, char *outmarker) +static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, + uintptr_t marker[2]) { - outmarker[0] = 0; - - long i; - struct list_s *mlst = pseg->modified_old_objects; - struct list_s *mlstm = pseg->modified_old_objects_markers; - for (i = list_count(mlst); --i >= 0; ) { - if (list_item(mlst, i) == (uintptr_t)obj) { - uintptr_t marker[2]; - assert(list_count(mlstm) == 2 * list_count(mlst)); - marker[0] = list_item(mlstm, i * 2 + 0); - marker[1] = list_item(mlstm, i * 2 + 1); - - marker_expand(marker, pseg->pub.segment_base, outmarker); - break; - } - } -} - -static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, - object_t *obj) -{ - struct stm_priv_segment_info_s *my_pseg, *other_pseg; - char *other_segment_base = get_segment_base(other_segment_num); - acquire_segment_lock(other_segment_base); + char *segment_base = get_segment_base(in_segment_num); + acquire_segment_lock(segment_base); assert(_has_mutex()); /* here, we acquired the other thread's segment_lock, which means that: @@ -122,12 +103,35 @@ (2) it is not mutating 'modified_old_objects' right now (we have the global mutex_lock at this point too). */ + long i; + struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); + struct list_s *mlst = pseg->modified_old_objects; + struct list_s *mlstm = pseg->modified_old_objects_markers; + for (i = list_count(mlst); --i >= 0; ) { + if (list_item(mlst, i) == (uintptr_t)obj) { + assert(list_count(mlstm) == 2 * list_count(mlst)); + marker[0] = list_item(mlstm, i * 2 + 0); + marker[1] = list_item(mlstm, i * 2 + 1); + goto done; + } + } + marker[0] = 0; + marker[1] = 0; + done: + release_segment_lock(segment_base); +} + +static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, + object_t *obj) +{ + uintptr_t marker[2]; + marker_fetch_obj_write(other_segment_num, obj, marker); + + struct stm_priv_segment_info_s *my_pseg, *other_pseg; + other_pseg = get_priv_segment(other_segment_num); my_pseg = get_priv_segment(STM_SEGMENT->segment_num); - other_pseg = get_priv_segment(other_segment_num); - - marker_lookup_from_thread(other_pseg, obj, my_pseg->marker_other); - - release_segment_lock(other_segment_base); + my_pseg->marker_other[0] = 0; + marker_expand(marker, other_pseg->pub.segment_base, my_pseg->marker_other); } static void marker_lookup_other_thread_inev(uint8_t other_segment_num) @@ -144,10 +148,14 @@ static void marker_lookup_same_thread_write_read(object_t *obj) { + uintptr_t marker[2]; + marker_fetch_obj_write(STM_SEGMENT->segment_num, obj, marker); + struct stm_priv_segment_info_s *my_pseg; - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); - marker_lookup_from_thread(my_pseg, obj, my_pseg->marker_self); + my_pseg->marker_self[0] = 0; + my_pseg->marker_other[0] = 0; + marker_expand(marker, STM_SEGMENT->segment_base, my_pseg->marker_self); } static void marker_fetch_inev(void) diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -6,6 +6,8 @@ static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); +static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, + uintptr_t marker[2]); static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, object_t *obj); static void marker_lookup_other_thread_inev(uint8_t other_segment_num); diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -159,7 +159,7 @@ @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") def expand_marker(base, number, ptr, outbuf, outbufsize): seen.append(number) - s = '%d %r\x00' % (number, ptr) + s = '%d %r\x00' % (number, ptr == ffi.NULL) assert len(s) <= outbufsize outbuf[0:len(s)] = s seen = [] @@ -172,7 +172,7 @@ self.push_root(ffi.cast("object_t *", 29)) self.push_root(ffi.cast("object_t *", ffi.NULL)) raw = lib._stm_expand_marker() - assert ffi.string(raw).startswith('29 ') + assert ffi.string(raw) == '29 True' assert seen == [29] def test_double_abort_markers_cb_write_write(self): @@ -192,6 +192,7 @@ self.pop_root() self.push_root(ffi.cast("object_t *", 17)) self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_minor_collect() # self.switch(1) self.start_transaction() @@ -220,6 +221,7 @@ self.pop_root() self.push_root(ffi.cast("object_t *", 17)) self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_minor_collect() # self.switch(1) self.start_transaction() @@ -259,3 +261,41 @@ assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ assert ffi.string(tl.longest_marker_self) == '19' assert ffi.string(tl.longest_marker_other) == '' + + def test_double_remote_markers_cb_write_write(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'A') + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + tl0 = self.get_stm_thread_local() + # + self.switch(1) + self.start_transaction() + self.become_inevitable() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'B') # aborts in #0 + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 23)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + # + py.test.raises(Conflict, self.switch, 0) + # + tl = self.get_stm_thread_local() + assert tl is tl0 + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE + assert ffi.string(tl.longest_marker_self) == '19' + assert ffi.string(tl.longest_marker_other) == '21' From noreply at buildbot.pypy.org Mon Apr 28 17:16:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 17:16:17 +0200 (CEST) Subject: [pypy-commit] stmgc marker: hg merge default Message-ID: <20140428151617.370291D2933@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1187:06fa05eeb305 Date: 2014-04-28 17:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/06fa05eeb305/ Log: hg merge default diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -44,6 +44,8 @@ visit((object_t **)&n->next); } +void stmcb_commit_soon() {} + static void expand_marker(char *base, uintptr_t odd_number, object_t *following_object, char *outputbuf, size_t outputbufsize) diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c --- a/c7/demo/demo_largemalloc.c +++ b/c7/demo/demo_largemalloc.c @@ -23,6 +23,8 @@ abort(); } +void stmcb_commit_soon() {} + /************************************************************/ #define ARENA_SIZE (1024*1024*1024) diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -79,6 +79,8 @@ assert(n->next == *last_next); } +void stmcb_commit_soon() {} + int get_rand(int max) { if (max == 0) diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -39,6 +39,8 @@ visit((object_t **)&n->next); } +void stmcb_commit_soon() {} + static sem_t done; diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -165,7 +165,8 @@ change_timing_state(wait_category); - /* XXX should also tell other_pseg "please commit soon" */ + /* tell the other to commit ASAP */ + signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); @@ -181,6 +182,9 @@ } else if (!contmgr.abort_other) { + /* tell the other to commit ASAP, since it causes aborts */ + signal_other_to_commit_soon(contmgr.other_pseg); + dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; if (kind == WRITE_WRITE_CONTENTION) @@ -267,6 +271,13 @@ abort_data_structures_from_segment_num(other_segment_num); } dprintf(("killed other thread\n")); + + /* we should commit soon, we caused an abort */ + //signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num)); + if (!STM_PSEGMENT->signalled_to_commit_soon) { + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + } } } diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -6,7 +6,8 @@ static void inevitable_contention_management(uint8_t other_segment_num); static inline bool is_abort(uintptr_t nursery_end) { - return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE + && nursery_end != NSE_SIGCOMMITSOON); } static inline bool is_aborting_now(uint8_t other_segment_num) { diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -14,13 +14,10 @@ #define EVENTUALLY(condition) \ { \ if (!(condition)) { \ - int _i; \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_acquire(lock_pages_privatizing[_i]); \ + acquire_privatization_lock(); \ if (!(condition)) \ stm_fatalerror("fails: " #condition); \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_release(lock_pages_privatizing[_i]); \ + release_privatization_lock(); \ } \ } #endif @@ -78,11 +75,11 @@ if (write_locks[lock_idx] == 0) { /* A lock to prevent reading garbage from lookup_other_thread_recorded_marker() */ - acquire_segment_lock(STM_SEGMENT->segment_base); + acquire_marker_lock(STM_SEGMENT->segment_base); if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], 0, lock_num))) { - release_segment_lock(STM_SEGMENT->segment_base); + release_marker_lock(STM_SEGMENT->segment_base); goto retry; } @@ -99,7 +96,7 @@ list_append2(STM_PSEGMENT->modified_old_objects_markers, marker[0], marker[1]); - release_segment_lock(STM_SEGMENT->segment_base); + release_marker_lock(STM_SEGMENT->segment_base); /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only @@ -210,6 +207,7 @@ assert(STM_PSEGMENT->transaction_state == TS_NONE); change_timing_state(STM_TIME_RUN_CURRENT); STM_PSEGMENT->start_time = tl->_timing_cur_start; + STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; #ifndef NDEBUG STM_PSEGMENT->marker_inev[1] = 99999999999999999L; @@ -362,9 +360,12 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. + + Must be called with the privatization lock acquired. */ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(STM_PSEGMENT->privatization_lock == 1); uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; @@ -406,26 +407,9 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ + assert(memcmp(dst, src, copy_size) == 0); /* same page */ } - /* Do a full memory barrier. We must make sure that other - CPUs see the changes we did to the shared page ("S", - above) before we check the other segments below with - is_private_page(). Otherwise, we risk the following: - this CPU writes "S" but the writes are not visible yet; - then it checks is_private_page() and gets false, and does - nothing more; just afterwards another CPU sets its own - private_page bit and copies the page; but it risks doing - so before seeing the "S" writes. - - XXX what is the cost of this? If it's high, then we - should reorganize the code so that we buffer the second - parts and do them by bunch of N, after just one call to - __sync_synchronize()... - */ - __sync_synchronize(); - for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; @@ -442,7 +426,7 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ + assert(!memcmp(dst, src, copy_size)); /* same page */ } } @@ -456,12 +440,15 @@ if (STM_PSEGMENT->large_overflow_objects == NULL) return; + acquire_privatization_lock(); LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, synchronize_object_now(item)); + release_privatization_lock(); } static void push_modified_to_other_segments(void) { + acquire_privatization_lock(); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, @@ -481,6 +468,7 @@ private pages as needed */ synchronize_object_now(item); })); + release_privatization_lock(); list_clear(STM_PSEGMENT->modified_old_objects); list_clear(STM_PSEGMENT->modified_old_objects_markers); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -156,9 +156,21 @@ /* For sleeping contention management */ bool signal_when_done; - /* When we mutate 'modified_old_objects' but we don't have the - global mutex, we must acquire this lock. */ - uint8_t segment_lock; + /* This lock is acquired when that segment calls synchronize_object_now. + On the rare event of a page_privatize(), the latter will acquire + all the locks in all segments. Otherwise, for the common case, + it's cheap. (The set of all 'privatization_lock' in all segments + works like one single read-write lock, with page_privatize() acquiring + the write lock; but this variant is more efficient for the case of + many reads / rare writes.) */ + uint8_t privatization_lock; + + /* This lock is acquired when we mutate 'modified_old_objects' but + we don't have the global mutex. It is also acquired during minor + collection. It protects against a different thread that tries to + get this segment's marker corresponding to some object, or to + expand the marker into a full description. */ + uint8_t marker_lock; /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ @@ -166,6 +178,9 @@ object_t *threadlocal_at_start_of_transaction; struct stm_shadowentry_s *shadowstack_at_abort; + /* Already signalled to commit soon: */ + bool signalled_to_commit_soon; + /* For debugging */ #ifndef NDEBUG pthread_t running_pthread; @@ -245,16 +260,30 @@ static void copy_object_to_shared(object_t *obj, int source_segment_num); static void synchronize_object_now(object_t *obj); -static inline void acquire_segment_lock(char *segment_base) +static inline void acquire_privatization_lock(void) { - uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, - &STM_PSEGMENT->segment_lock); + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); spinlock_acquire(*lock); } -static inline void release_segment_lock(char *segment_base) +static inline void release_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_release(*lock); +} + +static inline void acquire_marker_lock(char *segment_base) { uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, - &STM_PSEGMENT->segment_lock); + &STM_PSEGMENT->marker_lock); + spinlock_acquire(*lock); +} + +static inline void release_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); spinlock_release(*lock); } diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -92,17 +92,20 @@ /* uncommon case: need to initialize some more pages */ spinlock_acquire(lock_growth_large); - if (addr + size > uninitialized_page_start) { + char *start = uninitialized_page_start; + if (addr + size > start) { uintptr_t npages; - npages = (addr + size - uninitialized_page_start) / 4096UL; + npages = (addr + size - start) / 4096UL; npages += GCPAGE_NUM_PAGES; - if (uninitialized_page_stop - uninitialized_page_start < - npages * 4096UL) { + if (uninitialized_page_stop - start < npages * 4096UL) { stm_fatalerror("out of memory!"); /* XXX */ } - setup_N_pages(uninitialized_page_start, npages); - __sync_synchronize(); - uninitialized_page_start += npages * 4096UL; + setup_N_pages(start, npages); + if (!__sync_bool_compare_and_swap(&uninitialized_page_start, + start, + start + npages * 4096UL)) { + stm_fatalerror("uninitialized_page_start changed?"); + } } spinlock_release(lock_growth_large); return addr; diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -353,6 +353,9 @@ mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); +#ifndef NDEBUG + memset((char *)&mscan->d, 0xda, request_size); +#endif lm_unlock(); diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -92,10 +92,10 @@ uintptr_t marker[2]) { char *segment_base = get_segment_base(in_segment_num); - acquire_segment_lock(segment_base); + acquire_marker_lock(segment_base); assert(_has_mutex()); - /* here, we acquired the other thread's segment_lock, which means that: + /* here, we acquired the other thread's marker_lock, which means that: (1) it has finished filling 'modified_old_objects' after it sets up the write_locks[] value that we're conflicting with @@ -118,7 +118,7 @@ marker[0] = 0; marker[1] = 0; done: - release_segment_lock(segment_base); + release_marker_lock(segment_base); } static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -215,7 +215,9 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) { + acquire_privatization_lock(); synchronize_object_now(obj); + release_privatization_lock(); } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); @@ -293,6 +295,8 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); + acquire_marker_lock(STM_SEGMENT->segment_base); + STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { /* 'STM_PSEGMENT->overflow_number' is used now by this collection, @@ -336,6 +340,8 @@ assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); + + release_marker_lock(STM_SEGMENT->segment_base); } static void minor_collection(bool commit) diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,6 +1,7 @@ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ #define NSE_SIGPAUSE STM_TIME_WAIT_OTHER +#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON static uint32_t highest_overflow_number; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -108,18 +108,20 @@ { /* check this thread's 'pages_privatized' bit */ uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + volatile struct page_shared_s *ps = (volatile struct page_shared_s *) + &pages_privatized[pagenum - PAGE_FLAG_START]; if (ps->by_segment & bitmask) { /* the page is already privatized; nothing to do */ return; } -#ifndef NDEBUG - spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + spinlock_acquire(get_priv_segment(i)->privatization_lock); + } /* add this thread's 'pages_privatized' bit */ - __sync_fetch_and_add(&ps->by_segment, bitmask); + ps->by_segment |= bitmask; /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -133,9 +135,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); -#ifndef NDEBUG - spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + for (i = NB_SEGMENTS; i >= 1; i--) { + spinlock_release(get_priv_segment(i)->privatization_lock); + } } static void _page_do_reshare(long segnum, uintptr_t pagenum) diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -34,20 +34,6 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; -/* Rules for concurrent access to this array, possibly with is_private_page(): - - - we clear bits only during major collection, when all threads are - synchronized anyway - - - we set only the bit corresponding to our segment number, using - an atomic addition; and we do it _before_ we actually make the - page private. - - - concurrently, other threads checking the bits might (rarely) - get the answer 'true' to is_private_page() even though it is not - actually private yet. This inconsistency is in the direction - that we want for synchronize_object_now(). -*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -72,7 +58,3 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } - -#ifndef NDEBUG -static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; -#endif diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -2,6 +2,10 @@ #include #include +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + /* Each segment can be in one of three possible states, described by the segment variable 'safe_point': @@ -260,6 +264,18 @@ static bool _safe_points_requested = false; #endif +static void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) +{ + assert(_has_mutex()); + /* never overwrite abort signals or safepoint requests + (too messy to deal with) */ + if (!other_pseg->signalled_to_commit_soon + && !is_abort(other_pseg->pub.nursery_end) + && !pause_signalled) { + other_pseg->pub.nursery_end = NSE_SIGCOMMITSOON; + } +} + static void signal_everybody_to_pause_running(void) { assert(_safe_points_requested == false); @@ -323,7 +339,21 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) break; /* no safe point requested */ + if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); + } + + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + if (!pause_signalled) { + STM_SEGMENT->nursery_end = NURSERY_END; + break; + } + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + } assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + assert(pause_signalled); /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -58,6 +58,7 @@ "wait write read", "wait inevitable", "wait other", + "sync commit soon", "bookkeeping", "minor gc", "major gc", diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -66,6 +66,7 @@ STM_TIME_WAIT_WRITE_READ, STM_TIME_WAIT_INEVITABLE, STM_TIME_WAIT_OTHER, + STM_TIME_SYNC_COMMIT_SOON, STM_TIME_BOOKKEEPING, STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, @@ -216,9 +217,13 @@ The "size rounded up" must be a multiple of 8 and at least 16. "Tracing" an object means enumerating all GC references in it, by invoking the callback passed as argument. + stmcb_commit_soon() is called when it is advised to commit + the transaction as soon as possible in order to avoid conflicts + or improve performance in general. */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +extern void stmcb_commit_soon(void); /* Allocate an object of the given size, which must be a multiple diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -302,6 +302,9 @@ STM_POP_MARKER(*tl); } +void stmcb_commit_soon() +{ +} ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_LARGEMALLOC_TEST', '1'), From noreply at buildbot.pypy.org Mon Apr 28 17:53:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 17:53:24 +0200 (CEST) Subject: [pypy-commit] stmgc marker: in-progress: clean-up Message-ID: <20140428155324.7CFB61C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1188:fe38fc7a7c18 Date: 2014-04-28 17:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/fe38fc7a7c18/ Log: in-progress: clean-up diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -187,12 +187,7 @@ dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; - if (kind == WRITE_WRITE_CONTENTION) - marker_lookup_other_thread_write_write(other_segment_num, obj); - else if (kind == INEVITABLE_CONTENTION) - marker_lookup_other_thread_inev(other_segment_num); - else if (kind == WRITE_READ_CONTENTION) - marker_lookup_same_thread_write_read(obj); + marker_contention_abort_self(abort_category, other_segment_num, obj); abort_with_mutex(); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -635,8 +635,9 @@ (int)pseg->transaction_state); } - /* look up and preserve the marker information as a string */ - marker_fetch_expand(pseg); + /* if we don't have marker information already, look up and preserve + the marker information from the shadowstack as a string */ + marker_default_for_abort(pseg); /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -13,25 +13,40 @@ static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) { + /* fetch the current marker from the tl's shadow stack, + and return it in 'marker[2]'. */ struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; - /* stop walking just before shadowstack_base, which contains - STM_STACK_MARKER_OLD which shouldn't be expanded */ - while (--current > base) { - if (((uintptr_t)current->ss) & 1) { - /* found the odd marker */ - marker[0] = (uintptr_t)current[0].ss; - marker[1] = (uintptr_t)current[1].ss; - return; - } + + /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is + a convenient stopper for the loop below but which shouldn't + be returned. */ + assert(base->ss == (object_t *)STM_STACK_MARKER_OLD); + + while (!(((uintptr_t)current->ss) & 1)) { + current--; + assert(current >= base); } - marker[0] = 0; - marker[1] = 0; + if (current != base) { + /* found the odd marker */ + marker[0] = (uintptr_t)current[0].ss; + marker[1] = (uintptr_t)current[1].ss; + } + else { + /* no marker found */ + marker[0] = 0; + marker[1] = 0; + } } static void marker_expand(uintptr_t marker[2], char *segment_base, char *outmarker) { + /* Expand the marker given by 'marker[2]' into a full string. This + works assuming that the marker was produced inside the segment + given by 'segment_base'. If that's from a different thread, you + must first acquire the corresponding 'marker_lock'. */ + assert(_has_mutex()); outmarker[0] = 0; if (marker[0] == 0) return; /* no marker entry found */ @@ -41,7 +56,7 @@ } } -static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) { if (pseg->marker_self[0] != 0) return; /* already collected an entry */ @@ -58,8 +73,10 @@ static char _result[_STM_MARKER_LEN]; uintptr_t marker[2]; _result[0] = 0; + s_mutex_lock(); marker_fetch(STM_SEGMENT->running_thread, marker); marker_expand(marker, STM_SEGMENT->segment_base, _result); + s_mutex_unlock(); return _result; } @@ -91,8 +108,6 @@ static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, uintptr_t marker[2]) { - char *segment_base = get_segment_base(in_segment_num); - acquire_marker_lock(segment_base); assert(_has_mutex()); /* here, we acquired the other thread's marker_lock, which means that: @@ -112,50 +127,58 @@ assert(list_count(mlstm) == 2 * list_count(mlst)); marker[0] = list_item(mlstm, i * 2 + 0); marker[1] = list_item(mlstm, i * 2 + 1); - goto done; + return; } } marker[0] = 0; marker[1] = 0; - done: - release_marker_lock(segment_base); } -static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, - object_t *obj) +static void marker_contention_abort_self(int category, + uint8_t other_segment_num, + object_t *obj) { - uintptr_t marker[2]; - marker_fetch_obj_write(other_segment_num, obj, marker); + uintptr_t self_marker[2]; + uintptr_t other_marker[2]; + struct stm_priv_segment_info_s *my_pseg, *other_pseg; - struct stm_priv_segment_info_s *my_pseg, *other_pseg; + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); other_pseg = get_priv_segment(other_segment_num); - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); - my_pseg->marker_other[0] = 0; - marker_expand(marker, other_pseg->pub.segment_base, my_pseg->marker_other); -} -static void marker_lookup_other_thread_inev(uint8_t other_segment_num) -{ - /* same as marker_lookup_other_thread_write_write(), but for - an inevitable contention instead of a write-write contention */ - struct stm_priv_segment_info_s *my_pseg, *other_pseg; - assert(_has_mutex()); - other_pseg = get_priv_segment(other_segment_num); - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); - marker_expand(other_pseg->marker_inev, other_pseg->pub.segment_base, - my_pseg->marker_other); -} + char *my_segment_base = STM_SEGMENT->segment_base; + char *other_segment_base = get_segment_base(other_segment_num); -static void marker_lookup_same_thread_write_read(object_t *obj) -{ - uintptr_t marker[2]; - marker_fetch_obj_write(STM_SEGMENT->segment_num, obj, marker); + /* I'm aborting. Collect the location for myself. It's usually + the current location, except in a write-read abort, in which + case it's the older location of the write. */ + if (category == STM_TIME_RUN_ABORTED_WRITE_READ) + marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + else + marker_fetch(my_pseg->pub.running_thread, self_marker); - struct stm_priv_segment_info_s *my_pseg; - my_pseg = get_priv_segment(STM_SEGMENT->segment_num); - my_pseg->marker_self[0] = 0; - my_pseg->marker_other[0] = 0; - marker_expand(marker, STM_SEGMENT->segment_base, my_pseg->marker_self); + marker_expand(self_marker, my_segment_base, my_pseg->marker_self); + + /* For some categories, we can also collect the relevant information + for the other segment. */ + acquire_marker_lock(other_segment_base); + + switch (category) { + case STM_TIME_RUN_ABORTED_WRITE_WRITE: + marker_fetch_obj_write(other_segment_num, obj, other_marker); + break; + case STM_TIME_RUN_ABORTED_INEVITABLE: + other_marker[0] = other_pseg->marker_inev[0]; + other_marker[1] = other_pseg->marker_inev[1]; + break; + default: + other_marker[0] = 0; + other_marker[1] = 0; + break; + } + + marker_expand(other_marker, other_segment_base, my_pseg->marker_other); + + release_marker_lock(other_segment_base); } static void marker_fetch_inev(void) diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -1,15 +1,13 @@ static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); +static void marker_fetch_inev(void); static void marker_expand(uintptr_t marker[2], char *segment_base, char *outmarker); -static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg); +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); -static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, - uintptr_t marker[2]); -static void marker_lookup_other_thread_write_write(uint8_t other_segment_num, - object_t *obj); -static void marker_lookup_other_thread_inev(uint8_t other_segment_num); -static void marker_lookup_same_thread_write_read(object_t *obj); -static void marker_fetch_inev(void); + +static void marker_contention_abort_self(int category, + uint8_t other_segment_num, + object_t *obj); From noreply at buildbot.pypy.org Mon Apr 28 18:03:52 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 28 Apr 2014 18:03:52 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add paper outline Message-ID: <20140428160352.387A71D24A5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5198:656d221a75a6 Date: 2014-04-28 18:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/656d221a75a6/ Log: add paper outline diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex new file mode 100644 --- /dev/null +++ b/talk/icooolps2014/position-paper.tex @@ -0,0 +1,163 @@ + +\documentclass{sigplanconf} + +% The following \documentclass options may be useful: + +% preprint Remove this option only once the paper is in final form. +% 10pt To set in 10-point type instead of 9-point. +% 11pt To set in 11-point type instead of 9-point. +% authoryear To obtain author/year citation style instead of numeric. + +\usepackage[utf8]{inputenc} + +\usepackage{amsmath} + + +\begin{document} + +\special{papersize=8.5in,11in} +\setlength{\pdfpageheight}{\paperheight} +\setlength{\pdfpagewidth}{\paperwidth} + +\conferenceinfo{ICOOOLPS workshop 2014}{July 28th, 2014, Uppsala, Sweden} +\copyrightyear{2014} +\copyrightdata{978-1-nnnn-nnnn-n/yy/mm} +\doi{nnnnnnn.nnnnnnn} + +% Uncomment one of the following two, if you are not going for the +% traditional copyright transfer agreement. + +%\exclusivelicense % ACM gets exclusive license to publish, + % you retain copyright + +%\permissiontopublish % ACM gets nonexclusive license to publish + % (paid open-access papers, + % short abstracts) + +%% \titlebanner{banner above paper title} % These are ignored unless +%% \preprintfooter{short description of paper} % 'preprint' option specified. + +\title{Title Text} +\subtitle{Position Paper, ICOOOLPS'14} + +\authorinfo{Remi Meier} + {Department of Computer Science\\ ETH Zürich} + {remi.meier at inf.ethz.ch} +\authorinfo{Armin Rigo} + {www.pypy.org} + {arigo at tunes.org} + +\maketitle + +\begin{abstract} +This is the text of the abstract. +\end{abstract} + +\category{CR-number}{subcategory}{third-level} + +% general terms are not compulsory anymore, +% you may leave them out +%% \terms +%% term1, term2 + +\keywords +transactional memory, dynamic languages, parallelism, global interpreter lock + +\section{Introduction} + +\subsection*{Issue} +efficiently supporting multi-CPU usage on dynamic languages that were designed with GIL semantics in +mind + +(supporting (large) atomic blocks for synchronization) + +\subsection*{Our Position} +Current solutions for replacing the GIL include STM, HTM, and +fine-grained locking. STM is usually too slow, HTM very limited, and +locking suffers from complexity that makes it hard to implement +correctly. We argue that the best way forward is still STM and that +its performance problem can be solved. + +%% Current solutions like STM, HTM, and fine-grained locking are slow, hard +%% to implement correctly, and don't fit the specific problems of dynamic +%% language. STM is the best way forward but has bad performance, so we +%% fix that. + +\section{Discussion} +\paragraph{dynamic language VM problems} + +- high allocation rate (short lived objects)\\ +- (don't know anything about the program that runs until it actually runs: arbitrary atomic block size) + +\paragraph{GIL} + +- nice semantics\\ +- easy support of atomic blocks\\ +- no parallelism + +\paragraph{fine-grained locking} + +- support of atomic blocks?\\ +- hard to get right (deadlocks, performance, lock-granularity)\\ +- very hard to get right for a large language\\ +- hard to retro-fit, as all existing code assumes GIL semantics\\ +- (there are some semantic differences, right? not given perfect lock-placement, but well) +( http://www.jython.org/jythonbook/en/1.0/Concurrency.html ) + +\paragraph{multiprocessing / no-sharing models} + +- often needs major restructuring of programs (explicit data exchange)\\ +- sometimes communication overhead is too large\\ +- shared memory is a problem, copies of memory are too expensive + +\paragraph{HTM} + +- false-sharing on cache-line level\\ +- limited capacity (caches, undocumented)\\ +- random aborts (haswell)\\ +- generally: transaction-length limited (no atomic blocks) + +\paragraph{STM} + +- overhead (100-1000\%) (barrier reference resolution, kills performance on low \#cpu) +(FastLane: low overhead, not much gain)\\ +- unlimited transaction length (easy atomic blocks) + +\section{Potential Approach} +possible solution:\\ +- use virtual memory paging to somehow lower the STM overhead\\ +- tight integration with GC and jit? + + +\appendix +\section{Appendix Title} + +This is the text of the appendix, if you need one. + +\acks + +Acknowledgments, if needed. + +% We recommend abbrvnat bibliography style. + +\bibliographystyle{abbrvnat} + +% The bibliography should be embedded for final submission. + +\begin{thebibliography}{} +\softraggedright + +\bibitem[Smith et~al.(2009)Smith, Jones]{smith02} +P. Q. Smith, and X. Y. Jones. ...reference text... + +\end{thebibliography} + + +\end{document} + +% Revision History +% -------- ------- +% Date Person Ver. Change +% ---- ------ ---- ------ + +% 2013.06.29 TU 0.1--4 comments on permission/copyright notices diff --git a/talk/icooolps2014/sigplanconf.cls b/talk/icooolps2014/sigplanconf.cls new file mode 100644 --- /dev/null +++ b/talk/icooolps2014/sigplanconf.cls @@ -0,0 +1,1311 @@ +%----------------------------------------------------------------------------- +% +% LaTeX Class/Style File +% +% Name: sigplanconf.cls +% +% Purpose: A LaTeX 2e class file for SIGPLAN conference proceedings. +% This class file supercedes acm_proc_article-sp, +% sig-alternate, and sigplan-proc. +% +% Author: Paul C. Anagnostopoulos +% Windfall Software +% 978 371-2316 +% paul [atsign] windfall.com +% +% Created: 12 September 2004 +% +% Revisions: See end of file. +% +% This work is licensed under the Creative Commons Attribution License. +% To view a copy of this license, visit +% http://creativecommons.org/licenses/by/3.0/ +% or send a letter to Creative Commons, 171 2nd Street, Suite 300, +% San Francisco, California, 94105, U.S.A. +% +%----------------------------------------------------------------------------- + + +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{sigplanconf}[2013/07/02 v2.8 ACM SIGPLAN Proceedings] + +% The following few pages contain LaTeX programming extensions adapted +% from the ZzTeX macro package. + +% Token Hackery +% ----- ------- + + +\def \@expandaftertwice {\expandafter\expandafter\expandafter} +\def \@expandafterthrice {\expandafter\expandafter\expandafter\expandafter + \expandafter\expandafter\expandafter} + +% This macro discards the next token. + +\def \@discardtok #1{}% token + +% This macro removes the `pt' following a dimension. + +{\catcode `\p = 12 \catcode `\t = 12 + +\gdef \@remover #1pt{#1} + +} % \catcode + +% This macro extracts the contents of a macro and returns it as plain text. +% Usage: \expandafter\@defof \meaning\macro\@mark + +\def \@defof #1:->#2\@mark{#2} + +% Control Sequence Names +% ------- -------- ----- + + +\def \@name #1{% {\tokens} + \csname \expandafter\@discardtok \string#1\endcsname} + +\def \@withname #1#2{% {\command}{\tokens} + \expandafter#1\csname \expandafter\@discardtok \string#2\endcsname} + +% Flags (Booleans) +% ----- ---------- + +% The boolean literals \@true and \@false are appropriate for use with +% the \if command, which tests the codes of the next two characters. + +\def \@true {TT} +\def \@false {FL} + +\def \@setflag #1=#2{\edef #1{#2}}% \flag = boolean + +% IF and Predicates +% -- --- ---------- + +% A "predicate" is a macro that returns \@true or \@false as its value. +% Such values are suitable for use with the \if conditional. For example: +% +% \if \@oddp{\x} \else \fi + +% A predicate can be used with \@setflag as follows: +% +% \@setflag \flag = {} + +% Here are the predicates for TeX's repertoire of conditional +% commands. These might be more appropriately interspersed with +% other definitions in this module, but what the heck. +% Some additional "obvious" predicates are defined. + +\def \@eqlp #1#2{\ifnum #1 = #2\@true \else \@false \fi} +\def \@neqlp #1#2{\ifnum #1 = #2\@false \else \@true \fi} +\def \@lssp #1#2{\ifnum #1 < #2\@true \else \@false \fi} +\def \@gtrp #1#2{\ifnum #1 > #2\@true \else \@false \fi} +\def \@zerop #1{\ifnum #1 = 0\@true \else \@false \fi} +\def \@onep #1{\ifnum #1 = 1\@true \else \@false \fi} +\def \@posp #1{\ifnum #1 > 0\@true \else \@false \fi} +\def \@negp #1{\ifnum #1 < 0\@true \else \@false \fi} +\def \@oddp #1{\ifodd #1\@true \else \@false \fi} +\def \@evenp #1{\ifodd #1\@false \else \@true \fi} +\def \@rangep #1#2#3{\if \@orp{\@lssp{#1}{#2}}{\@gtrp{#1}{#3}}\@false \else + \@true \fi} +\def \@tensp #1{\@rangep{#1}{10}{19}} + +\def \@dimeqlp #1#2{\ifdim #1 = #2\@true \else \@false \fi} +\def \@dimneqlp #1#2{\ifdim #1 = #2\@false \else \@true \fi} +\def \@dimlssp #1#2{\ifdim #1 < #2\@true \else \@false \fi} +\def \@dimgtrp #1#2{\ifdim #1 > #2\@true \else \@false \fi} +\def \@dimzerop #1{\ifdim #1 = 0pt\@true \else \@false \fi} +\def \@dimposp #1{\ifdim #1 > 0pt\@true \else \@false \fi} +\def \@dimnegp #1{\ifdim #1 < 0pt\@true \else \@false \fi} + +\def \@vmodep {\ifvmode \@true \else \@false \fi} +\def \@hmodep {\ifhmode \@true \else \@false \fi} +\def \@mathmodep {\ifmmode \@true \else \@false \fi} +\def \@textmodep {\ifmmode \@false \else \@true \fi} +\def \@innermodep {\ifinner \@true \else \@false \fi} + +\long\def \@codeeqlp #1#2{\if #1#2\@true \else \@false \fi} + +\long\def \@cateqlp #1#2{\ifcat #1#2\@true \else \@false \fi} + +\long\def \@tokeqlp #1#2{\ifx #1#2\@true \else \@false \fi} +\long\def \@xtokeqlp #1#2{\expandafter\ifx #1#2\@true \else \@false \fi} + +\long\def \@definedp #1{% + \expandafter\ifx \csname \expandafter\@discardtok \string#1\endcsname + \relax \@false \else \@true \fi} + +\long\def \@undefinedp #1{% + \expandafter\ifx \csname \expandafter\@discardtok \string#1\endcsname + \relax \@true \else \@false \fi} + +\def \@emptydefp #1{\ifx #1\@empty \@true \else \@false \fi}% {\name} + +\let \@emptylistp = \@emptydefp + +\long\def \@emptyargp #1{% {#n} + \@empargp #1\@empargq\@mark} +\long\def \@empargp #1#2\@mark{% + \ifx #1\@empargq \@true \else \@false \fi} +\def \@empargq {\@empargq} + +\def \@emptytoksp #1{% {\tokenreg} + \expandafter\@emptoksp \the#1\@mark} + +\long\def \@emptoksp #1\@mark{\@emptyargp{#1}} + +\def \@voidboxp #1{\ifvoid #1\@true \else \@false \fi} +\def \@hboxp #1{\ifhbox #1\@true \else \@false \fi} +\def \@vboxp #1{\ifvbox #1\@true \else \@false \fi} + +\def \@eofp #1{\ifeof #1\@true \else \@false \fi} + + +% Flags can also be used as predicates, as in: +% +% \if \flaga \else \fi + + +% Now here we have predicates for the common logical operators. + +\def \@notp #1{\if #1\@false \else \@true \fi} + +\def \@andp #1#2{\if #1% + \if #2\@true \else \@false \fi + \else + \@false + \fi} + +\def \@orp #1#2{\if #1% + \@true + \else + \if #2\@true \else \@false \fi + \fi} + +\def \@xorp #1#2{\if #1% + \if #2\@false \else \@true \fi + \else + \if #2\@true \else \@false \fi + \fi} + +% Arithmetic +% ---------- + +\def \@increment #1{\advance #1 by 1\relax}% {\count} + +\def \@decrement #1{\advance #1 by -1\relax}% {\count} + +% Options +% ------- + + +\@setflag \@authoryear = \@false +\@setflag \@blockstyle = \@false +\@setflag \@copyrightwanted = \@true +\@setflag \@explicitsize = \@false +\@setflag \@mathtime = \@false +\@setflag \@natbib = \@true +\@setflag \@ninepoint = \@true +\newcount{\@numheaddepth} \@numheaddepth = 3 +\@setflag \@onecolumn = \@false +\@setflag \@preprint = \@false +\@setflag \@reprint = \@false +\@setflag \@tenpoint = \@false +\@setflag \@times = \@false + +% Note that all the dangerous article class options are trapped. + +\DeclareOption{9pt}{\@setflag \@ninepoint = \@true + \@setflag \@explicitsize = \@true} + +\DeclareOption{10pt}{\PassOptionsToClass{10pt}{article}% + \@setflag \@ninepoint = \@false + \@setflag \@tenpoint = \@true + \@setflag \@explicitsize = \@true} + +\DeclareOption{11pt}{\PassOptionsToClass{11pt}{article}% + \@setflag \@ninepoint = \@false + \@setflag \@explicitsize = \@true} + +\DeclareOption{12pt}{\@unsupportedoption{12pt}} + +\DeclareOption{a4paper}{\@unsupportedoption{a4paper}} + +\DeclareOption{a5paper}{\@unsupportedoption{a5paper}} + +\DeclareOption{authoryear}{\@setflag \@authoryear = \@true} + +\DeclareOption{b5paper}{\@unsupportedoption{b5paper}} + +\DeclareOption{blockstyle}{\@setflag \@blockstyle = \@true} + +\DeclareOption{cm}{\@setflag \@times = \@false} + +\DeclareOption{computermodern}{\@setflag \@times = \@false} + +\DeclareOption{executivepaper}{\@unsupportedoption{executivepaper}} + +\DeclareOption{indentedstyle}{\@setflag \@blockstyle = \@false} + +\DeclareOption{landscape}{\@unsupportedoption{landscape}} + +\DeclareOption{legalpaper}{\@unsupportedoption{legalpaper}} + +\DeclareOption{letterpaper}{\@unsupportedoption{letterpaper}} + +\DeclareOption{mathtime}{\@setflag \@mathtime = \@true} + +\DeclareOption{natbib}{\@setflag \@natbib = \@true} + +\DeclareOption{nonatbib}{\@setflag \@natbib = \@false} + +\DeclareOption{nocopyrightspace}{\@setflag \@copyrightwanted = \@false} + +\DeclareOption{notitlepage}{\@unsupportedoption{notitlepage}} + +\DeclareOption{numberedpars}{\@numheaddepth = 4} + +\DeclareOption{numbers}{\@setflag \@authoryear = \@false} + +%%%\DeclareOption{onecolumn}{\@setflag \@onecolumn = \@true} + +\DeclareOption{preprint}{\@setflag \@preprint = \@true} + +\DeclareOption{reprint}{\@setflag \@reprint = \@true} + +\DeclareOption{times}{\@setflag \@times = \@true} + +\DeclareOption{titlepage}{\@unsupportedoption{titlepage}} + +\DeclareOption{twocolumn}{\@setflag \@onecolumn = \@false} + +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}} + +\ExecuteOptions{9pt,indentedstyle,times} +\@setflag \@explicitsize = \@false +\ProcessOptions + +\if \@onecolumn + \if \@notp{\@explicitsize}% + \@setflag \@ninepoint = \@false + \PassOptionsToClass{11pt}{article}% + \fi + \PassOptionsToClass{twoside,onecolumn}{article} +\else + \PassOptionsToClass{twoside,twocolumn}{article} +\fi +\LoadClass{article} + +\def \@unsupportedoption #1{% + \ClassError{proc}{The standard '#1' option is not supported.}} + +% This can be used with the 'reprint' option to get the final folios. + +\def \setpagenumber #1{% + \setcounter{page}{#1}} + +\AtEndDocument{\label{sigplanconf at finalpage}} + +% Utilities +% --------- + + +\newcommand{\setvspace}[2]{% + #1 = #2 + \advance #1 by -1\parskip} + +% Document Parameters +% -------- ---------- + + +% Page: + +\setlength{\hoffset}{-1in} +\setlength{\voffset}{-1in} + +\setlength{\topmargin}{1in} +\setlength{\headheight}{0pt} +\setlength{\headsep}{0pt} + +\if \@onecolumn + \setlength{\evensidemargin}{.75in} + \setlength{\oddsidemargin}{.75in} +\else + \setlength{\evensidemargin}{.75in} + \setlength{\oddsidemargin}{.75in} +\fi + +% Text area: + +\newdimen{\standardtextwidth} +\setlength{\standardtextwidth}{42pc} + +\if \@onecolumn + \setlength{\textwidth}{40.5pc} +\else + \setlength{\textwidth}{\standardtextwidth} +\fi + +\setlength{\topskip}{8pt} +\setlength{\columnsep}{2pc} +\setlength{\textheight}{54.5pc} + +% Running foot: + +\setlength{\footskip}{30pt} + +% Paragraphs: + +\if \@blockstyle + \setlength{\parskip}{5pt plus .1pt minus .5pt} + \setlength{\parindent}{0pt} +\else + \setlength{\parskip}{0pt} + \setlength{\parindent}{12pt} +\fi + +\setlength{\lineskip}{.5pt} +\setlength{\lineskiplimit}{\lineskip} + +\frenchspacing +\pretolerance = 400 +\tolerance = \pretolerance +\setlength{\emergencystretch}{5pt} +\clubpenalty = 10000 +\widowpenalty = 10000 +\setlength{\hfuzz}{.5pt} + +% Standard vertical spaces: + +\newskip{\standardvspace} +\setvspace{\standardvspace}{5pt plus 1pt minus .5pt} + +% Margin paragraphs: + +\setlength{\marginparwidth}{36pt} +\setlength{\marginparsep}{2pt} +\setlength{\marginparpush}{8pt} + + +\setlength{\skip\footins}{8pt plus 3pt minus 1pt} +\setlength{\footnotesep}{9pt} + +\renewcommand{\footnoterule}{% + \hrule width .5\columnwidth height .33pt depth 0pt} + +\renewcommand{\@makefntext}[1]{% + \noindent \@makefnmark \hspace{1pt}#1} + +% Floats: + +\setcounter{topnumber}{4} +\setcounter{bottomnumber}{1} +\setcounter{totalnumber}{4} + +\renewcommand{\fps at figure}{tp} +\renewcommand{\fps at table}{tp} +\renewcommand{\topfraction}{0.90} +\renewcommand{\bottomfraction}{0.30} +\renewcommand{\textfraction}{0.10} +\renewcommand{\floatpagefraction}{0.75} + +\setcounter{dbltopnumber}{4} + +\renewcommand{\dbltopfraction}{\topfraction} +\renewcommand{\dblfloatpagefraction}{\floatpagefraction} + +\setlength{\floatsep}{18pt plus 4pt minus 2pt} +\setlength{\textfloatsep}{18pt plus 4pt minus 3pt} +\setlength{\intextsep}{10pt plus 4pt minus 3pt} + +\setlength{\dblfloatsep}{18pt plus 4pt minus 2pt} +\setlength{\dbltextfloatsep}{20pt plus 4pt minus 3pt} + +% Miscellaneous: + +\errorcontextlines = 5 + +% Fonts +% ----- + + +\if \@times + \renewcommand{\rmdefault}{ptm}% + \if \@mathtime + \usepackage[mtbold,noTS1]{mathtime}% + \else +%%% \usepackage{mathptm}% + \fi +\else + \relax +\fi + +\if \@ninepoint + +\renewcommand{\normalsize}{% + \@setfontsize{\normalsize}{9pt}{10pt}% + \setlength{\abovedisplayskip}{5pt plus 1pt minus .5pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{3pt plus 1pt minus 2pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\tiny}{\@setfontsize{\tiny}{5pt}{6pt}} + +\renewcommand{\scriptsize}{\@setfontsize{\scriptsize}{7pt}{8pt}} + +\renewcommand{\small}{% + \@setfontsize{\small}{8pt}{9pt}% + \setlength{\abovedisplayskip}{4pt plus 1pt minus 1pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{2pt plus 1pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\footnotesize}{% + \@setfontsize{\footnotesize}{8pt}{9pt}% + \setlength{\abovedisplayskip}{4pt plus 1pt minus .5pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{2pt plus 1pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\large}{\@setfontsize{\large}{11pt}{13pt}} + +\renewcommand{\Large}{\@setfontsize{\Large}{14pt}{18pt}} + +\renewcommand{\LARGE}{\@setfontsize{\LARGE}{18pt}{20pt}} + +\renewcommand{\huge}{\@setfontsize{\huge}{20pt}{25pt}} + +\renewcommand{\Huge}{\@setfontsize{\Huge}{25pt}{30pt}} + +\else\if \@tenpoint + +\relax + +\else + +\relax + +\fi\fi + +% Abstract +% -------- + + +\renewenvironment{abstract}{% + \section*{Abstract}% + \normalsize}{% + } + +% Bibliography +% ------------ + + +\renewenvironment{thebibliography}[1] + {\section*{\refname + \@mkboth{\MakeUppercase\refname}{\MakeUppercase\refname}}% + \list{\@biblabel{\@arabic\c at enumiv}}% + {\settowidth\labelwidth{\@biblabel{#1}}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \@openbib at code + \usecounter{enumiv}% + \let\p at enumiv\@empty + \renewcommand\theenumiv{\@arabic\c at enumiv}}% + \bibfont + \clubpenalty4000 + \@clubpenalty \clubpenalty + \widowpenalty4000% + \sfcode`\.\@m} + {\def\@noitemerr + {\@latex at warning{Empty `thebibliography' environment}}% + \endlist} + +\if \@natbib + +\if \@authoryear + \typeout{Using natbib package with 'authoryear' citation style.} + \usepackage[authoryear,square]{natbib} + \bibpunct{(}{)}{;}{a}{}{,} % Change fences to parentheses; + % citation separator to semicolon; + % eliminate comma between author and year. + \let \cite = \citep +\else + \typeout{Using natbib package with 'numbers' citation style.} + \usepackage[numbers,sort&compress,square]{natbib} +\fi +\setlength{\bibsep}{3pt plus .5pt minus .25pt} + +\fi + +\def \bibfont {\small} + +% Categories +% ---------- + + +\@setflag \@firstcategory = \@true + +\newcommand{\category}[3]{% + \if \@firstcategory + \paragraph*{Categories and Subject Descriptors}% + \@setflag \@firstcategory = \@false + \else + \unskip ;\hspace{.75em}% + \fi + \@ifnextchar [{\@category{#1}{#2}{#3}}{\@category{#1}{#2}{#3}[]}} + +\def \@category #1#2#3[#4]{% + {\let \and = \relax + #1 [\textit{#2}]% + \if \@emptyargp{#4}% + \if \@notp{\@emptyargp{#3}}: #3\fi + \else + :\space + \if \@notp{\@emptyargp{#3}}#3---\fi + \textrm{#4}% + \fi}} + +% Copyright Notice +% --------- ------ + + +\def \ftype at copyrightbox {8} +\def \@toappear {} +\def \@permission {} +\def \@reprintprice {} + +\def \@copyrightspace {% + \@float{copyrightbox}[b]% + \vbox to 1.2in{% + \vfill + \parbox[b]{20pc}{% + \scriptsize + \if \@preprint + [Copyright notice will appear here + once 'preprint' option is removed.]\par + \else + \@toappear + \fi + \if \@reprint + \noindent Reprinted from \@conferencename, + \@proceedings, + \@conferenceinfo, + pp.~\number\thepage--\pageref{sigplanconf at finalpage}.\par + \fi}}% + \end at float} + +\newcommand{\reprintprice}[1]{% + \gdef \@reprintprice {#1}} + +\reprintprice{\$15.00} + +\long\def \toappear #1{% + \def \@toappear {#1}} + +\toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent Copyright \copyright\ \@copyrightyear\ ACM \@copyrightdata + \dots \@reprintprice.\par + \noindent http://dx.doi.org/10.1145/\@doi } + +\newcommand{\permission}[1]{% + \gdef \@permission {#1}} + +\permission{% + Permission to make digital or hard copies of all or part of this work for + personal or classroom use is granted without fee provided that copies are + not made or distributed for profit or commercial advantage and that copies + bear this notice and the full citation on the first page. Copyrights for + components of this work owned by others than ACM must be honored. + Abstracting with credit is permitted. To copy otherwise, or republish, to + post on servers or to redistribute to lists, requires prior specific + permission and/or a fee. Request permissions from permissions at acm.org.} + +% These are two new rights management and bibstrip text blocks. + +\newcommand{\exclusivelicense}{% + \permission{% + Permission to make digital or hard copies of all or part of this work for + personal or classroom use is granted without fee provided that copies are + not made or distributed for profit or commercial advantage and that copies + bear this notice and the full citation on the first page. Copyrights for + components of this work owned by others than the author(s) must be honored. + Abstracting with credit is permitted. To copy otherwise, or republish, to + post on servers or to redistribute to lists, requires prior specific + permission and/or a fee. Request permissions from permissions at acm.org.} + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent Copyright is held by the owner/author(s). Publication rights licensed to ACM. \par + \noindent ACM \@copyrightdata \dots \@reprintprice.\par + \noindent http://dx.doi.org/10.1145/\@doi}} + +\newcommand{\permissiontopublish}{% + \permission{% + Permission to make digital or hard copies of part or all of this work for + personal or classroom use is granted without fee provided that copies are + not made or distributed for profit or commercial advantage and that copies + bear this notice and the full citation on the first page. Copyrights for + third-party components of this work must be honored. + For all other uses, contact the owner/author(s).}% + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent Copyright is held by the owner/author(s). \par + \noindent ACM \@copyrightdata.\par + \noindent http://dx.doi.org/10.1145/\@doi}} + +% The following permission notices are +% for the traditional copyright transfer agreement option. + +% Exclusive license and permission-to-publish +% give more complicated permission notices. +% These are not covered here. + +\newcommand{\ACMCanadapermission}{% + \permission{% + ACM acknowledges that this contribution was authored or + co-authored by an affiliate of the Canadian National + Government. As such, the Crown in Right of Canada retains an equal + interest in the copyright. Reprint requests should be forwarded to + ACM.}} + +\newcommand{\ACMUSpermission}{% + \permission{% + ACM acknowledges that this contribution was authored or + co-authored by a contractor or affiliate of the United States + Government. As such, the United States Government retains a + nonexclusive, royalty-free right to publish or reproduce this + article, or to allow others to do so, for Government purposes + only.}} + +\newcommand{\USpublicpermission}{% + \permission{% + This paper is authored by an employee(s) of the United States + Government and is in the public domain. Non-exclusive copying or + redistribution is allowed, provided that the article citation is + given and the authors and the agency are clearly identified as its + source.}% + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent ACM \@copyrightdata.\par + \noindent http://dx.doi.org/10.1145/\@doi}} + +\newcommand{\authorversion}[4]{% + \permission{% + Copyright \copyright\ ACM, #1. This is the author's version of the work. + It is posted here by permission of ACM for your personal use. + Not for redistribution. The definitive version was published in + #2, #3, http://dx.doi.org/10.1145/#4.}} + +% Enunciations +% ------------ + + +\def \@begintheorem #1#2{% {name}{number} + \trivlist + \item[\hskip \labelsep \textsc{#1 #2.}]% + \itshape\selectfont + \ignorespaces} + +\def \@opargbegintheorem #1#2#3{% {name}{number}{title} + \trivlist + \item[% + \hskip\labelsep \textsc{#1\ #2}% + \if \@notp{\@emptyargp{#3}}\nut (#3).\fi]% + \itshape\selectfont + \ignorespaces} + +% Figures +% ------- + + +\@setflag \@caprule = \@true + +\long\def \@makecaption #1#2{% + \addvspace{4pt} + \if \@caprule + \hrule width \hsize height .33pt + \vspace{4pt} + \fi + \setbox \@tempboxa = \hbox{\@setfigurenumber{#1.}\nut #2}% + \if \@dimgtrp{\wd\@tempboxa}{\hsize}% + \noindent \@setfigurenumber{#1.}\nut #2\par + \else + \centerline{\box\@tempboxa}% + \fi} + +\newcommand{\nocaptionrule}{% + \@setflag \@caprule = \@false} + +\def \@setfigurenumber #1{% + {\rmfamily \bfseries \selectfont #1}} + +% Hierarchy +% --------- + + +\setcounter{secnumdepth}{\@numheaddepth} + +\newskip{\@sectionaboveskip} +\setvspace{\@sectionaboveskip}{10pt plus 3pt minus 2pt} + +\newskip{\@sectionbelowskip} +\if \@blockstyle + \setlength{\@sectionbelowskip}{0.1pt}% +\else + \setlength{\@sectionbelowskip}{4pt}% +\fi + +\renewcommand{\section}{% + \@startsection + {section}% + {1}% + {0pt}% + {-\@sectionaboveskip}% + {\@sectionbelowskip}% + {\large \bfseries \raggedright}} + +\newskip{\@subsectionaboveskip} +\setvspace{\@subsectionaboveskip}{8pt plus 2pt minus 2pt} + +\newskip{\@subsectionbelowskip} +\if \@blockstyle + \setlength{\@subsectionbelowskip}{0.1pt}% +\else + \setlength{\@subsectionbelowskip}{4pt}% +\fi + +\renewcommand{\subsection}{% + \@startsection% + {subsection}% + {2}% + {0pt}% + {-\@subsectionaboveskip}% + {\@subsectionbelowskip}% + {\normalsize \bfseries \raggedright}} + +\renewcommand{\subsubsection}{% + \@startsection% + {subsubsection}% + {3}% + {0pt}% + {-\@subsectionaboveskip} + {\@subsectionbelowskip}% + {\normalsize \bfseries \raggedright}} + +\newskip{\@paragraphaboveskip} +\setvspace{\@paragraphaboveskip}{6pt plus 2pt minus 2pt} + +\renewcommand{\paragraph}{% + \@startsection% + {paragraph}% + {4}% + {0pt}% + {\@paragraphaboveskip} + {-1em}% + {\normalsize \bfseries \if \@times \itshape \fi}} + +\renewcommand{\subparagraph}{% + \@startsection% + {subparagraph}% + {4}% + {0pt}% + {\@paragraphaboveskip} + {-1em}% + {\normalsize \itshape}} + +% Standard headings: + +\newcommand{\acks}{\section*{Acknowledgments}} + +\newcommand{\keywords}{\paragraph*{Keywords}} + +\newcommand{\terms}{\paragraph*{General Terms}} + +% Identification +% -------------- + + +\def \@conferencename {} +\def \@conferenceinfo {} +\def \@copyrightyear {} +\def \@copyrightdata {[to be supplied]} +\def \@proceedings {[Unknown Proceedings]} + + +\newcommand{\conferenceinfo}[2]{% + \gdef \@conferencename {#1}% + \gdef \@conferenceinfo {#2}} + +\newcommand{\copyrightyear}[1]{% + \gdef \@copyrightyear {#1}} + +\let \CopyrightYear = \copyrightyear + +\newcommand{\copyrightdata}[1]{% + \gdef \@copyrightdata {#1}} + +\let \crdata = \copyrightdata + +\newcommand{\doi}[1]{% + \gdef \@doi {#1}} + +\newcommand{\proceedings}[1]{% + \gdef \@proceedings {#1}} + +% Lists +% ----- + + +\setlength{\leftmargini}{13pt} +\setlength\leftmarginii{13pt} +\setlength\leftmarginiii{13pt} +\setlength\leftmarginiv{13pt} +\setlength{\labelsep}{3.5pt} + +\setlength{\topsep}{\standardvspace} +\if \@blockstyle + \setlength{\itemsep}{1pt} + \setlength{\parsep}{3pt} +\else + \setlength{\itemsep}{1pt} + \setlength{\parsep}{3pt} +\fi + +\renewcommand{\labelitemi}{{\small \centeroncapheight{\textbullet}}} +\renewcommand{\labelitemii}{\centeroncapheight{\rule{2.5pt}{2.5pt}}} +\renewcommand{\labelitemiii}{$-$} +\renewcommand{\labelitemiv}{{\Large \textperiodcentered}} + +\renewcommand{\@listi}{% + \leftmargin = \leftmargini + \listparindent = 0pt} +%%% \itemsep = 1pt +%%% \parsep = 3pt} +%%% \listparindent = \parindent} + +\let \@listI = \@listi + +\renewcommand{\@listii}{% + \leftmargin = \leftmarginii + \topsep = 1pt + \labelwidth = \leftmarginii + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +\renewcommand{\@listiii}{% + \leftmargin = \leftmarginiii + \labelwidth = \leftmarginiii + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +\renewcommand{\@listiv}{% + \leftmargin = \leftmarginiv + \labelwidth = \leftmarginiv + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +% Mathematics +% ----------- + + +\def \theequation {\arabic{equation}} + +% Miscellaneous +% ------------- + + +\newcommand{\balancecolumns}{% + \vfill\eject + \global\@colht = \textheight + \global\ht\@cclv = \textheight} + +\newcommand{\nut}{\hspace{.5em}} + +\newcommand{\softraggedright}{% + \let \\ = \@centercr + \leftskip = 0pt + \rightskip = 0pt plus 10pt} + +% Program Code +% ------- ---- + + +\newcommand{\mono}[1]{% + {\@tempdima = \fontdimen2\font + \texttt{\spaceskip = 1.1\@tempdima #1}}} + +% Running Heads and Feet +% ------- ----- --- ---- + + +\def \@preprintfooter {} + +\newcommand{\preprintfooter}[1]{% + \gdef \@preprintfooter {#1}} + +\if \@preprint + +\def \ps at plain {% + \let \@mkboth = \@gobbletwo + \let \@evenhead = \@empty + \def \@evenfoot {\scriptsize + \rlap{\textit{\@preprintfooter}}\hfil + \thepage \hfil + \llap{\textit{\@formatyear}}}% + \let \@oddhead = \@empty + \let \@oddfoot = \@evenfoot} + +\else\if \@reprint + +\def \ps at plain {% + \let \@mkboth = \@gobbletwo + \let \@evenhead = \@empty + \def \@evenfoot {\scriptsize \hfil \thepage \hfil}% + \let \@oddhead = \@empty + \let \@oddfoot = \@evenfoot} + +\else + +\let \ps at plain = \ps at empty +\let \ps at headings = \ps at empty +\let \ps at myheadings = \ps at empty + +\fi\fi + +\def \@formatyear {% + \number\year/\number\month/\number\day} + +% Special Characters +% ------- ---------- + + +\DeclareRobustCommand{\euro}{% + \protect{\rlap{=}}{\sf \kern .1em C}} + +% Title Page +% ----- ---- + + +\@setflag \@addauthorsdone = \@false + +\def \@titletext {\@latex at error{No title was provided}{}} +\def \@subtitletext {} + +\newcount{\@authorcount} + +\newcount{\@titlenotecount} +\newtoks{\@titlenotetext} + +\def \@titlebanner {} + +\renewcommand{\title}[1]{% + \gdef \@titletext {#1}} + +\newcommand{\subtitle}[1]{% + \gdef \@subtitletext {#1}} + +\newcommand{\authorinfo}[3]{% {names}{affiliation}{email/URL} + \global\@increment \@authorcount + \@withname\gdef {\@authorname\romannumeral\@authorcount}{#1}% + \@withname\gdef {\@authoraffil\romannumeral\@authorcount}{#2}% + \@withname\gdef {\@authoremail\romannumeral\@authorcount}{#3}} + +\renewcommand{\author}[1]{% + \@latex at error{The \string\author\space command is obsolete; + use \string\authorinfo}{}} + +\newcommand{\titlebanner}[1]{% + \gdef \@titlebanner {#1}} + +\renewcommand{\maketitle}{% + \pagestyle{plain}% + \if \@onecolumn + {\hsize = \standardtextwidth + \@maketitle}% + \else + \twocolumn[\@maketitle]% + \fi + \@placetitlenotes + \if \@copyrightwanted \@copyrightspace \fi} + +\def \@maketitle {% + \begin{center} + \@settitlebanner + \let \thanks = \titlenote + {\leftskip = 0pt plus 0.25\linewidth + \rightskip = 0pt plus 0.25 \linewidth + \parfillskip = 0pt + \spaceskip = .7em + \noindent \LARGE \bfseries \@titletext \par} + \vskip 6pt + \noindent \Large \@subtitletext \par + \vskip 12pt + \ifcase \@authorcount + \@latex at error{No authors were specified for this paper}{}\or + \@titleauthors{i}{}{}\or + \@titleauthors{i}{ii}{}\or + \@titleauthors{i}{ii}{iii}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{xi}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{xi}{xii}% + \else + \@latex at error{Cannot handle more than 12 authors}{}% + \fi + \vspace{1.75pc} + \end{center}} + +\def \@settitlebanner {% + \if \@andp{\@preprint}{\@notp{\@emptydefp{\@titlebanner}}}% + \vbox to 0pt{% + \vskip -32pt + \noindent \textbf{\@titlebanner}\par + \vss}% + \nointerlineskip + \fi} + +\def \@titleauthors #1#2#3{% + \if \@andp{\@emptyargp{#2}}{\@emptyargp{#3}}% + \noindent \@setauthor{40pc}{#1}{\@false}\par + \else\if \@emptyargp{#3}% + \noindent \@setauthor{17pc}{#1}{\@false}\hspace{3pc}% + \@setauthor{17pc}{#2}{\@false}\par + \else + \noindent \@setauthor{12.5pc}{#1}{\@false}\hspace{2pc}% + \@setauthor{12.5pc}{#2}{\@false}\hspace{2pc}% + \@setauthor{12.5pc}{#3}{\@true}\par + \relax + \fi\fi + \vspace{20pt}} + +\def \@setauthor #1#2#3{% {width}{text}{unused} + \vtop{% + \def \and {% + \hspace{16pt}} + \hsize = #1 + \normalfont + \centering + \large \@name{\@authorname#2}\par + \vspace{5pt} + \normalsize \@name{\@authoraffil#2}\par + \vspace{2pt} + \textsf{\@name{\@authoremail#2}}\par}} + +\def \@maybetitlenote #1{% + \if \@andp{#1}{\@gtrp{\@authorcount}{3}}% + \titlenote{See page~\pageref{@addauthors} for additional authors.}% + \fi} + +\newtoks{\@fnmark} + +\newcommand{\titlenote}[1]{% + \global\@increment \@titlenotecount + \ifcase \@titlenotecount \relax \or + \@fnmark = {\ast}\or + \@fnmark = {\dagger}\or + \@fnmark = {\ddagger}\or + \@fnmark = {\S}\or + \@fnmark = {\P}\or + \@fnmark = {\ast\ast}% + \fi + \,$^{\the\@fnmark}$% + \edef \reserved at a {\noexpand\@appendtotext{% + \noexpand\@titlefootnote{\the\@fnmark}}}% + \reserved at a{#1}} + +\def \@appendtotext #1#2{% + \global\@titlenotetext = \expandafter{\the\@titlenotetext #1{#2}}} + +\newcount{\@authori} + +\iffalse +\def \additionalauthors {% + \if \@gtrp{\@authorcount}{3}% + \section{Additional Authors}% + \label{@addauthors}% + \noindent + \@authori = 4 + {\let \\ = ,% + \loop + \textbf{\@name{\@authorname\romannumeral\@authori}}, + \@name{\@authoraffil\romannumeral\@authori}, + email: \@name{\@authoremail\romannumeral\@authori}.% + \@increment \@authori + \if \@notp{\@gtrp{\@authori}{\@authorcount}} \repeat}% + \par + \fi + \global\@setflag \@addauthorsdone = \@true} +\fi + +\let \addauthorsection = \additionalauthors + +\def \@placetitlenotes { + \the\@titlenotetext} + +% Utilities +% --------- + + +\newcommand{\centeroncapheight}[1]{% + {\setbox\@tempboxa = \hbox{#1}% + \@measurecapheight{\@tempdima}% % Calculate ht(CAP) - ht(text) + \advance \@tempdima by -\ht\@tempboxa % ------------------ + \divide \@tempdima by 2 % 2 + \raise \@tempdima \box\@tempboxa}} + +\newbox{\@measbox} + +\def \@measurecapheight #1{% {\dimen} + \setbox\@measbox = \hbox{ABCDEFGHIJKLMNOPQRSTUVWXYZ}% + #1 = \ht\@measbox} + +\long\def \@titlefootnote #1#2{% + \insert\footins{% + \reset at font\footnotesize + \interlinepenalty\interfootnotelinepenalty + \splittopskip\footnotesep + \splitmaxdepth \dp\strutbox \floatingpenalty \@MM + \hsize\columnwidth \@parboxrestore +%%% \protected at edef\@currentlabel{% +%%% \csname p at footnote\endcsname\@thefnmark}% + \color at begingroup + \def \@makefnmark {$^{#1}$}% + \@makefntext{% + \rule\z@\footnotesep\ignorespaces#2\@finalstrut\strutbox}% + \color at endgroup}} + +% LaTeX Modifications +% ----- ------------- + +\def \@seccntformat #1{% + \@name{\the#1}% + \@expandaftertwice\@seccntformata \csname the#1\endcsname.\@mark + \quad} + +\def \@seccntformata #1.#2\@mark{% + \if \@emptyargp{#2}.\fi} + +% Revision History +% -------- ------- + + +% Date Person Ver. Change +% ---- ------ ---- ------ + +% 2004.09.12 PCA 0.1--4 Preliminary development. + +% 2004.11.18 PCA 0.5 Start beta testing. + +% 2004.11.19 PCA 0.6 Obsolete \author and replace with +% \authorinfo. +% Add 'nocopyrightspace' option. +% Compress article opener spacing. +% Add 'mathtime' option. +% Increase text height by 6 points. + +% 2004.11.28 PCA 0.7 Add 'cm/computermodern' options. +% Change default to Times text. + +% 2004.12.14 PCA 0.8 Remove use of mathptm.sty; it cannot +% coexist with latexsym or amssymb. + +% 2005.01.20 PCA 0.9 Rename class file to sigplanconf.cls. + +% 2005.03.05 PCA 0.91 Change default copyright data. + +% 2005.03.06 PCA 0.92 Add at-signs to some macro names. + +% 2005.03.07 PCA 0.93 The 'onecolumn' option defaults to '11pt', +% and it uses the full type width. + +% 2005.03.15 PCA 0.94 Add at-signs to more macro names. +% Allow margin paragraphs during review. + +% 2005.03.22 PCA 0.95 Implement \euro. +% Remove proof and newdef environments. + +% 2005.05.06 PCA 1.0 Eliminate 'onecolumn' option. +% Change footer to small italic and eliminate +% left portion if no \preprintfooter. +% Eliminate copyright notice if preprint. +% Clean up and shrink copyright box. + +% 2005.05.30 PCA 1.1 Add alternate permission statements. + +% 2005.06.29 PCA 1.1 Publish final first edition of guide. + +% 2005.07.14 PCA 1.2 Add \subparagraph. +% Use block paragraphs in lists, and adjust +% spacing between items and paragraphs. + +% 2006.06.22 PCA 1.3 Add 'reprint' option and associated +% commands. + +% 2006.08.24 PCA 1.4 Fix bug in \maketitle case command. + +% 2007.03.13 PCA 1.5 The title banner only displays with the +% 'preprint' option. + +% 2007.06.06 PCA 1.6 Use \bibfont in \thebibliography. +% Add 'natbib' option to load and configure +% the natbib package. + +% 2007.11.20 PCA 1.7 Balance line lengths in centered article +% title (thanks to Norman Ramsey). + +% 2009.01.26 PCA 1.8 Change natbib \bibpunct values. + +% 2009.03.24 PCA 1.9 Change natbib to use the 'numbers' option. +% Change templates to use 'natbib' option. + +% 2009.09.01 PCA 2.0 Add \reprintprice command (suggested by +% Stephen Chong). + +% 2009.09.08 PCA 2.1 Make 'natbib' the default; add 'nonatbib'. +% SB Add 'authoryear' and 'numbers' (default) to +% control citation style when using natbib. +% Add \bibpunct to change punctuation for +% 'authoryear' style. + +% 2009.09.21 PCA 2.2 Add \softraggedright to the thebibliography +% environment. Also add to template so it will +% happen with natbib. + +% 2009.09.30 PCA 2.3 Remove \softraggedright from thebibliography. +% Just include in the template. + +% 2010.05.24 PCA 2.4 Obfuscate class author's email address. + +% 2011.11.08 PCA 2.5 Add copyright notice to this file. +% Remove 'sort' option from natbib when using +% 'authoryear' style. +% Add the \authorversion command. + +% 2013.02.22 PCA 2.6 Change natbib fences to parentheses when +% using 'authoryear' style. + +% 2013.05.17 PCA 2.7 Change standard and author copyright text. + +% 2013.07.02 TU 2.8 More changes to permission/copyright notes. +% Replaced ambiguous \authorpermission with +% \exclusivelicense and \permissiontopublish + + From noreply at buildbot.pypy.org Mon Apr 28 18:04:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 18:04:55 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Find out how to reuse the logic. Message-ID: <20140428160455.7EE761D24A5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1189:6b462cb2ba16 Date: 2014-04-28 18:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/6b462cb2ba16/ Log: Find out how to reuse the logic. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -187,7 +187,7 @@ dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; - marker_contention_abort_self(abort_category, other_segment_num, obj); + marker_contention(abort_category, false, other_segment_num, obj); abort_with_mutex(); } @@ -195,10 +195,7 @@ /* We have to signal the other thread to abort, and wait until it does. */ contmgr.other_pseg->pub.nursery_end = abort_category; - if (kind == WRITE_WRITE_CONTENTION) { - //marker_fetch_obj_write(contmgr.other_pseg->pub.segment_num, - // obj, contmgr.other_pseg->...); - } + marker_contention(abort_category, true, other_segment_num, obj); int sp = contmgr.other_pseg->safe_point; switch (sp) { diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -134,9 +134,8 @@ marker[1] = 0; } -static void marker_contention_abort_self(int category, - uint8_t other_segment_num, - object_t *obj) +static void marker_contention(int category, bool abort_other, + uint8_t other_segment_num, object_t *obj) { uintptr_t self_marker[2]; uintptr_t other_marker[2]; @@ -148,25 +147,30 @@ char *my_segment_base = STM_SEGMENT->segment_base; char *other_segment_base = get_segment_base(other_segment_num); - /* I'm aborting. Collect the location for myself. It's usually - the current location, except in a write-read abort, in which - case it's the older location of the write. */ + acquire_marker_lock(other_segment_base); + + /* Collect the location for myself. It's usually the current + location, except in a write-read abort, in which case it's the + older location of the write. */ if (category == STM_TIME_RUN_ABORTED_WRITE_READ) marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); else marker_fetch(my_pseg->pub.running_thread, self_marker); - marker_expand(self_marker, my_segment_base, my_pseg->marker_self); + /* Expand this location into either my_pseg->marker_self or + other_pseg->marker_other, depending on who aborts. */ + marker_expand(self_marker, my_segment_base, + abort_other ? other_pseg->marker_other + : my_pseg->marker_self); /* For some categories, we can also collect the relevant information for the other segment. */ - acquire_marker_lock(other_segment_base); - switch (category) { case STM_TIME_RUN_ABORTED_WRITE_WRITE: marker_fetch_obj_write(other_segment_num, obj, other_marker); break; case STM_TIME_RUN_ABORTED_INEVITABLE: + assert(abort_other == false); other_marker[0] = other_pseg->marker_inev[0]; other_marker[1] = other_pseg->marker_inev[1]; break; @@ -176,7 +180,16 @@ break; } - marker_expand(other_marker, other_segment_base, my_pseg->marker_other); + marker_expand(other_marker, other_segment_base, + abort_other ? other_pseg->marker_self + : my_pseg->marker_other); + + if (abort_other && other_pseg->marker_self[0] == 0) { + if (category == STM_TIME_RUN_ABORTED_WRITE_READ) + strcpy(other_pseg->marker_self, ""); + else + strcpy(other_pseg->marker_self, ""); + } release_marker_lock(other_segment_base); } diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -8,6 +8,5 @@ struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); -static void marker_contention_abort_self(int category, - uint8_t other_segment_num, - object_t *obj); +static void marker_contention(int category, bool abort_other, + uint8_t other_segment_num, object_t *obj); From noreply at buildbot.pypy.org Mon Apr 28 18:09:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 18:09:28 +0200 (CEST) Subject: [pypy-commit] stmgc marker: A passing test Message-ID: <20140428160928.266D71D2932@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1190:3e7dc1e81647 Date: 2014-04-28 18:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/3e7dc1e81647/ Log: A passing test diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -299,3 +299,36 @@ assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE assert ffi.string(tl.longest_marker_self) == '19' assert ffi.string(tl.longest_marker_other) == '21' + + def test_double_remote_markers_cb_write_read(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + assert stm_get_char(p) == '\x00' # read + tl0 = self.get_stm_thread_local() + # + self.switch(1) + self.start_transaction() + self.become_inevitable() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'B') # write, will abort #0 + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 23)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + self.commit_transaction() + # + py.test.raises(Conflict, self.switch, 0) + # + tl = self.get_stm_thread_local() + assert tl is tl0 + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ + assert ffi.string(tl.longest_marker_self)=='' + assert ffi.string(tl.longest_marker_other) == '21' From noreply at buildbot.pypy.org Mon Apr 28 18:36:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 18:36:55 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Report markers for forced pauses. Seems to work but hard to test for now... Message-ID: <20140428163655.7D6281D2933@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1191:4bde66e3b621 Date: 2014-04-28 18:36 +0200 http://bitbucket.org/pypy/stmgc/changeset/4bde66e3b621/ Log: Report markers for forced pauses. Seems to work but hard to test for now... diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -51,7 +51,7 @@ char *outputbuf, size_t outputbufsize) { assert(following_object == NULL); - snprintf(outputbuf, outputbufsize, "<%lu>", odd_number); + snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number); } @@ -98,6 +98,18 @@ STM_START_TRANSACTION(&stm_thread_local, here); + if (stm_thread_local.longest_marker_state != 0) { + fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", + &stm_thread_local, + stm_thread_local.longest_marker_state, + stm_thread_local.longest_marker_time); + fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n", + stm_thread_local.longest_marker_self, + stm_thread_local.longest_marker_other); + stm_thread_local.longest_marker_state = 0; + stm_thread_local.longest_marker_time = 0.0; + } + nodeptr_t prev = initial; stm_read((objptr_t)prev); diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -162,6 +162,7 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; + marker_contention(kind, false, other_segment_num, obj); change_timing_state(wait_category); @@ -178,7 +179,13 @@ if (must_abort()) abort_with_mutex(); - change_timing_state(STM_TIME_RUN_CURRENT); + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + double elapsed = + change_timing_state_tl(pseg->pub.running_thread, + STM_TIME_RUN_CURRENT); + marker_copy(pseg->pub.running_thread, pseg, + wait_category, elapsed); } else if (!contmgr.abort_other) { @@ -187,7 +194,7 @@ dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; - marker_contention(abort_category, false, other_segment_num, obj); + marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } @@ -195,7 +202,7 @@ /* We have to signal the other thread to abort, and wait until it does. */ contmgr.other_pseg->pub.nursery_end = abort_category; - marker_contention(abort_category, true, other_segment_num, obj); + marker_contention(kind, true, other_segment_num, obj); int sp = contmgr.other_pseg->safe_point; switch (sp) { diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -134,7 +134,7 @@ marker[1] = 0; } -static void marker_contention(int category, bool abort_other, +static void marker_contention(int kind, bool abort_other, uint8_t other_segment_num, object_t *obj) { uintptr_t self_marker[2]; @@ -152,7 +152,7 @@ /* Collect the location for myself. It's usually the current location, except in a write-read abort, in which case it's the older location of the write. */ - if (category == STM_TIME_RUN_ABORTED_WRITE_READ) + if (kind == WRITE_READ_CONTENTION) marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); else marker_fetch(my_pseg->pub.running_thread, self_marker); @@ -165,11 +165,11 @@ /* For some categories, we can also collect the relevant information for the other segment. */ - switch (category) { - case STM_TIME_RUN_ABORTED_WRITE_WRITE: + switch (kind) { + case WRITE_WRITE_CONTENTION: marker_fetch_obj_write(other_segment_num, obj, other_marker); break; - case STM_TIME_RUN_ABORTED_INEVITABLE: + case INEVITABLE_CONTENTION: assert(abort_other == false); other_marker[0] = other_pseg->marker_inev[0]; other_marker[1] = other_pseg->marker_inev[1]; @@ -185,7 +185,7 @@ : my_pseg->marker_other); if (abort_other && other_pseg->marker_self[0] == 0) { - if (category == STM_TIME_RUN_ABORTED_WRITE_READ) + if (kind == WRITE_READ_CONTENTION) strcpy(other_pseg->marker_self, ""); else strcpy(other_pseg->marker_self, ""); diff --git a/c7/stm/marker.h b/c7/stm/marker.h --- a/c7/stm/marker.h +++ b/c7/stm/marker.h @@ -8,5 +8,5 @@ struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); -static void marker_contention(int category, bool abort_other, +static void marker_contention(int kind, bool abort_other, uint8_t other_segment_num, object_t *obj); diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -25,10 +25,11 @@ return oldstate; } -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) { TIMING_CHANGE(tl, newstate); + return elasped; } static void timing_end_transaction(enum stm_time_e attribute_to) diff --git a/c7/stm/timing.h b/c7/stm/timing.h --- a/c7/stm/timing.h +++ b/c7/stm/timing.h @@ -8,7 +8,7 @@ } static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); static void timing_end_transaction(enum stm_time_e attribute_to); From noreply at buildbot.pypy.org Mon Apr 28 18:59:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 28 Apr 2014 18:59:36 +0200 (CEST) Subject: [pypy-commit] pypy default: this workaround only necessary when using AI_NUMERICSERV Message-ID: <20140428165936.804F71D2938@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71044:877e67830b30 Date: 2014-04-28 12:53 -0400 http://bitbucket.org/pypy/pypy/changeset/877e67830b30/ Log: this workaround only necessary when using AI_NUMERICSERV diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -1146,9 +1146,9 @@ address_to_fill=None): # port_or_service is a string, not an int (but try str(port_number)). assert port_or_service is None or isinstance(port_or_service, str) - if _c._MACOSX: - if port_or_service is None or port_or_service == '0': - port_or_service = '00' + if _c._MACOSX and flags & AI_NUMERICSERV and \ + (port_or_service is None or port_or_service == '0'): + port_or_service = '00' hints = lltype.malloc(_c.addrinfo, flavor='raw', zero=True) rffi.setintfield(hints, 'c_ai_family', family) rffi.setintfield(hints, 'c_ai_socktype', socktype) From noreply at buildbot.pypy.org Mon Apr 28 19:09:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 19:09:28 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/4bde66e3b621 (branch "marker") Message-ID: <20140428170928.AE26D1C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71045:73611e136820 Date: 2014-04-28 18:45 +0200 http://bitbucket.org/pypy/pypy/changeset/73611e136820/ Log: import stmgc/4bde66e3b621 (branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -889897f2f5ef +4bde66e3b621 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -100,7 +100,8 @@ static void contention_management(uint8_t other_segment_num, - enum contention_kind_e kind) + enum contention_kind_e kind, + object_t *obj) { assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); @@ -162,10 +163,12 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; + marker_contention(kind, false, other_segment_num, obj); change_timing_state(wait_category); - /* XXX should also tell other_pseg "please commit soon" */ + /* tell the other to commit ASAP */ + signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); @@ -177,12 +180,22 @@ if (must_abort()) abort_with_mutex(); - change_timing_state(STM_TIME_RUN_CURRENT); + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + double elapsed = + change_timing_state_tl(pseg->pub.running_thread, + STM_TIME_RUN_CURRENT); + marker_copy(pseg->pub.running_thread, pseg, + wait_category, elapsed); } else if (!contmgr.abort_other) { + /* tell the other to commit ASAP, since it causes aborts */ + signal_other_to_commit_soon(contmgr.other_pseg); + dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; + marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } @@ -190,6 +203,7 @@ /* We have to signal the other thread to abort, and wait until it does. */ contmgr.other_pseg->pub.nursery_end = abort_category; + marker_contention(kind, true, other_segment_num, obj); int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -257,10 +271,18 @@ abort_data_structures_from_segment_num(other_segment_num); } dprintf(("killed other thread\n")); + + /* we should commit soon, we caused an abort */ + //signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num)); + if (!STM_PSEGMENT->signalled_to_commit_soon) { + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + } } } -static void write_write_contention_management(uintptr_t lock_idx) +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj) { s_mutex_lock(); @@ -271,7 +293,7 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION); + contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -280,12 +302,13 @@ s_mutex_unlock(); } -static void write_read_contention_management(uint8_t other_segment_num) +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj) { - contention_management(other_segment_num, WRITE_READ_CONTENTION); + contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION); + contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); } diff --git a/rpython/translator/stm/src_stm/stm/contention.h b/rpython/translator/stm/src_stm/stm/contention.h --- a/rpython/translator/stm/src_stm/stm/contention.h +++ b/rpython/translator/stm/src_stm/stm/contention.h @@ -1,11 +1,14 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -static void write_write_contention_management(uintptr_t lock_idx); -static void write_read_contention_management(uint8_t other_segment_num); +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj); +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj); static void inevitable_contention_management(uint8_t other_segment_num); static inline bool is_abort(uintptr_t nursery_end) { - return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE + && nursery_end != NSE_SIGCOMMITSOON); } static inline bool is_aborting_now(uint8_t other_segment_num) { diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -15,13 +15,10 @@ #define EVENTUALLY(condition) \ { \ if (!(condition)) { \ - int _i; \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_acquire(lock_pages_privatizing[_i]); \ + acquire_privatization_lock(); \ if (!(condition)) \ stm_fatalerror("fails: " #condition); \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_release(lock_pages_privatizing[_i]); \ + release_privatization_lock(); \ } \ } #endif @@ -77,9 +74,15 @@ assert(lock_idx < sizeof(write_locks)); retry: if (write_locks[lock_idx] == 0) { + /* A lock to prevent reading garbage from + lookup_other_thread_recorded_marker() */ + acquire_marker_lock(STM_SEGMENT->segment_base); + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], - 0, lock_num))) + 0, lock_num))) { + release_marker_lock(STM_SEGMENT->segment_base); goto retry; + } dprintf_test(("write_slowpath %p -> mod_old\n", obj)); @@ -87,6 +90,15 @@ Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); + /* Add the current marker, recording where we wrote to this object */ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->modified_old_objects_markers = + list_append2(STM_PSEGMENT->modified_old_objects_markers, + marker[0], marker[1]); + + release_marker_lock(STM_SEGMENT->segment_base); + /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ @@ -128,7 +140,7 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - write_write_contention_management(lock_idx); + write_write_contention_management(lock_idx, obj); goto retry; } @@ -196,7 +208,13 @@ assert(STM_PSEGMENT->transaction_state == TS_NONE); change_timing_state(STM_TIME_RUN_CURRENT); STM_PSEGMENT->start_time = tl->_timing_cur_start; + STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; +#ifndef NDEBUG + STM_PSEGMENT->marker_inev[1] = 99999999999999999L; +#endif + if (jmpbuf == NULL) + marker_fetch_inev(); STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; @@ -224,12 +242,17 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers)); assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); +#ifndef NDEBUG + /* this should not be used when objects_pointing_to_nursery == NULL */ + STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; +#endif check_nursery_at_transaction_start(); } @@ -264,7 +287,7 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - write_read_contention_management(i); + write_read_contention_management(i, item); /* If we reach this point, we didn't abort, but maybe we had to wait for the other thread to commit. If we @@ -338,9 +361,12 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. + + Must be called with the privatization lock acquired. */ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(STM_PSEGMENT->privatization_lock == 1); uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; @@ -382,26 +408,9 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ + assert(memcmp(dst, src, copy_size) == 0); /* same page */ } - /* Do a full memory barrier. We must make sure that other - CPUs see the changes we did to the shared page ("S", - above) before we check the other segments below with - is_private_page(). Otherwise, we risk the following: - this CPU writes "S" but the writes are not visible yet; - then it checks is_private_page() and gets false, and does - nothing more; just afterwards another CPU sets its own - private_page bit and copies the page; but it risks doing - so before seeing the "S" writes. - - XXX what is the cost of this? If it's high, then we - should reorganize the code so that we buffer the second - parts and do them by bunch of N, after just one call to - __sync_synchronize()... - */ - __sync_synchronize(); - for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; @@ -418,7 +427,7 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ + assert(!memcmp(dst, src, copy_size)); /* same page */ } } @@ -432,12 +441,15 @@ if (STM_PSEGMENT->large_overflow_objects == NULL) return; + acquire_privatization_lock(); LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, synchronize_object_now(item)); + release_privatization_lock(); } static void push_modified_to_other_segments(void) { + acquire_privatization_lock(); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, @@ -457,8 +469,10 @@ private pages as needed */ synchronize_object_now(item); })); + release_privatization_lock(); list_clear(STM_PSEGMENT->modified_old_objects); + list_clear(STM_PSEGMENT->modified_old_objects_markers); } static void _finish_transaction(int attribute_to) @@ -597,6 +611,7 @@ })); list_clear(pseg->modified_old_objects); + list_clear(pseg->modified_old_objects_markers); } static void abort_data_structures_from_segment_num(int segment_num) @@ -621,8 +636,9 @@ (int)pseg->transaction_state); } - /* look up and preserve the marker information as a string */ - marker_fetch_expand(pseg); + /* if we don't have marker information already, look up and preserve + the marker information from the shadowstack as a string */ + marker_default_for_abort(pseg); /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -706,6 +722,7 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); + marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -79,9 +79,17 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and - that need to be copied to other segments upon commit. */ + that need to be copied to other segments upon commit. Note that + every object takes three list items: the object, and two words for + the location marker. */ struct list_s *modified_old_objects; + /* For each entry in 'modified_old_objects', we have two entries + in the following list, which give the marker at the time we added + the entry to modified_old_objects. */ + struct list_s *modified_old_objects_markers; + uintptr_t modified_old_objects_markers_num_old; + /* List of out-of-nursery objects that may contain pointers to nursery objects. This is used to track the GC status: they are all objects outside the nursery on which an stm_write() occurred @@ -149,12 +157,31 @@ /* For sleeping contention management */ bool signal_when_done; + /* This lock is acquired when that segment calls synchronize_object_now. + On the rare event of a page_privatize(), the latter will acquire + all the locks in all segments. Otherwise, for the common case, + it's cheap. (The set of all 'privatization_lock' in all segments + works like one single read-write lock, with page_privatize() acquiring + the write lock; but this variant is more efficient for the case of + many reads / rare writes.) */ + uint8_t privatization_lock; + + /* This lock is acquired when we mutate 'modified_old_objects' but + we don't have the global mutex. It is also acquired during minor + collection. It protects against a different thread that tries to + get this segment's marker corresponding to some object, or to + expand the marker into a full description. */ + uint8_t marker_lock; + /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; struct stm_shadowentry_s *shadowstack_at_abort; + /* Already signalled to commit soon: */ + bool signalled_to_commit_soon; + /* For debugging */ #ifndef NDEBUG pthread_t running_pthread; @@ -162,6 +189,8 @@ /* Temporarily stores the marker information */ char marker_self[_STM_MARKER_LEN]; + char marker_other[_STM_MARKER_LEN]; + uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ }; enum /* safe_point */ { @@ -231,3 +260,31 @@ static void copy_object_to_shared(object_t *obj, int source_segment_num); static void synchronize_object_now(object_t *obj); + +static inline void acquire_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_acquire(*lock); +} + +static inline void release_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_release(*lock); +} + +static inline void acquire_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_acquire(*lock); +} + +static inline void release_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_release(*lock); +} diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -93,17 +93,20 @@ /* uncommon case: need to initialize some more pages */ spinlock_acquire(lock_growth_large); - if (addr + size > uninitialized_page_start) { + char *start = uninitialized_page_start; + if (addr + size > start) { uintptr_t npages; - npages = (addr + size - uninitialized_page_start) / 4096UL; + npages = (addr + size - start) / 4096UL; npages += GCPAGE_NUM_PAGES; - if (uninitialized_page_stop - uninitialized_page_start < - npages * 4096UL) { + if (uninitialized_page_stop - start < npages * 4096UL) { stm_fatalerror("out of memory!"); /* XXX */ } - setup_N_pages(uninitialized_page_start, npages); - __sync_synchronize(); - uninitialized_page_start += npages * 4096UL; + setup_N_pages(start, npages); + if (!__sync_bool_compare_and_swap(&uninitialized_page_start, + start, + start + npages * 4096UL)) { + stm_fatalerror("uninitialized_page_start changed?"); + } } spinlock_release(lock_growth_large); return addr; @@ -419,6 +422,23 @@ } } +static void mark_visit_from_markers(void) +{ + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + char *base = get_segment_base(j); + struct list_s *lst = get_priv_segment(j)->modified_old_objects_markers; + uintptr_t i; + for (i = list_count(lst); i > 0; i -= 2) { + mark_visit_object((object_t *)list_item(lst, i - 1), base); + } + if (get_priv_segment(j)->transaction_state == TS_INEVITABLE) { + uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; + mark_visit_object((object_t *)marker_inev_obj, base); + } + } +} + static void clean_up_segment_lists(void) { long i; @@ -521,6 +541,7 @@ /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); + mark_visit_from_markers(); mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c b/rpython/translator/stm/src_stm/stm/largemalloc.c --- a/rpython/translator/stm/src_stm/stm/largemalloc.c +++ b/rpython/translator/stm/src_stm/stm/largemalloc.c @@ -354,6 +354,9 @@ mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); +#ifndef NDEBUG + memset((char *)&mscan->d, 0xda, request_size); +#endif lm_unlock(); diff --git a/rpython/translator/stm/src_stm/stm/list.h b/rpython/translator/stm/src_stm/stm/list.h --- a/rpython/translator/stm/src_stm/stm/list.h +++ b/rpython/translator/stm/src_stm/stm/list.h @@ -34,6 +34,18 @@ #define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) +static inline struct list_s *list_append2(struct list_s *lst, + uintptr_t item0, uintptr_t item1) +{ + uintptr_t index = lst->count; + lst->count += 2; + if (UNLIKELY(index >= lst->last_allocated)) + lst = _list_grow(lst, index + 1); + lst->items[index + 0] = item0; + lst->items[index + 1] = item1; + return lst; +} + static inline void list_clear(struct list_s *lst) { @@ -67,6 +79,11 @@ lst->items[index] = newitem; } +static inline uintptr_t *list_ptr_to_item(struct list_s *lst, uintptr_t index) +{ + return &lst->items[index]; +} + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -12,38 +12,73 @@ const char *marker); -static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +{ + /* fetch the current marker from the tl's shadow stack, + and return it in 'marker[2]'. */ + struct stm_shadowentry_s *current = tl->shadowstack - 1; + struct stm_shadowentry_s *base = tl->shadowstack_base; + + /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is + a convenient stopper for the loop below but which shouldn't + be returned. */ + assert(base->ss == (object_t *)STM_STACK_MARKER_OLD); + + while (!(((uintptr_t)current->ss) & 1)) { + current--; + assert(current >= base); + } + if (current != base) { + /* found the odd marker */ + marker[0] = (uintptr_t)current[0].ss; + marker[1] = (uintptr_t)current[1].ss; + } + else { + /* no marker found */ + marker[0] = 0; + marker[1] = 0; + } +} + +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker) +{ + /* Expand the marker given by 'marker[2]' into a full string. This + works assuming that the marker was produced inside the segment + given by 'segment_base'. If that's from a different thread, you + must first acquire the corresponding 'marker_lock'. */ + assert(_has_mutex()); + outmarker[0] = 0; + if (marker[0] == 0) + return; /* no marker entry found */ + if (stmcb_expand_marker != NULL) { + stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], + outmarker, _STM_MARKER_LEN); + } +} + +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) { if (pseg->marker_self[0] != 0) return; /* already collected an entry */ - if (stmcb_expand_marker != NULL) { - stm_thread_local_t *tl = pseg->pub.running_thread; - struct stm_shadowentry_s *current = tl->shadowstack - 1; - struct stm_shadowentry_s *base = tl->shadowstack_base; - /* stop walking just before shadowstack_base, which contains - STM_STACK_MARKER_OLD which shouldn't be expanded */ - while (--current > base) { - uintptr_t x = (uintptr_t)current->ss; - if (x & 1) { - /* the stack entry is an odd number */ - stmcb_expand_marker(pseg->pub.segment_base, x, current[1].ss, - pseg->marker_self, _STM_MARKER_LEN); - - if (pseg->marker_self[0] != 0) - break; - } - } - } + uintptr_t marker[2]; + marker_fetch(pseg->pub.running_thread, marker); + marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); + pseg->marker_other[0] = 0; } char *_stm_expand_marker(void) { - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - pseg->marker_self[0] = 0; - marker_fetch_expand(pseg); - return pseg->marker_self; + /* for tests only! */ + static char _result[_STM_MARKER_LEN]; + uintptr_t marker[2]; + _result[0] = 0; + s_mutex_lock(); + marker_fetch(STM_SEGMENT->running_thread, marker); + marker_expand(marker, STM_SEGMENT->segment_base, _result); + s_mutex_unlock(); + return _result; } static void marker_copy(stm_thread_local_t *tl, @@ -65,6 +100,105 @@ tl->longest_marker_state = attribute_to; tl->longest_marker_time = time; memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); } pseg->marker_self[0] = 0; + pseg->marker_other[0] = 0; } + +static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, + uintptr_t marker[2]) +{ + assert(_has_mutex()); + + /* here, we acquired the other thread's marker_lock, which means that: + + (1) it has finished filling 'modified_old_objects' after it sets + up the write_locks[] value that we're conflicting with + + (2) it is not mutating 'modified_old_objects' right now (we have + the global mutex_lock at this point too). + */ + long i; + struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); + struct list_s *mlst = pseg->modified_old_objects; + struct list_s *mlstm = pseg->modified_old_objects_markers; + for (i = list_count(mlst); --i >= 0; ) { + if (list_item(mlst, i) == (uintptr_t)obj) { + assert(list_count(mlstm) == 2 * list_count(mlst)); + marker[0] = list_item(mlstm, i * 2 + 0); + marker[1] = list_item(mlstm, i * 2 + 1); + return; + } + } + marker[0] = 0; + marker[1] = 0; +} + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj) +{ + uintptr_t self_marker[2]; + uintptr_t other_marker[2]; + struct stm_priv_segment_info_s *my_pseg, *other_pseg; + + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + other_pseg = get_priv_segment(other_segment_num); + + char *my_segment_base = STM_SEGMENT->segment_base; + char *other_segment_base = get_segment_base(other_segment_num); + + acquire_marker_lock(other_segment_base); + + /* Collect the location for myself. It's usually the current + location, except in a write-read abort, in which case it's the + older location of the write. */ + if (kind == WRITE_READ_CONTENTION) + marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + else + marker_fetch(my_pseg->pub.running_thread, self_marker); + + /* Expand this location into either my_pseg->marker_self or + other_pseg->marker_other, depending on who aborts. */ + marker_expand(self_marker, my_segment_base, + abort_other ? other_pseg->marker_other + : my_pseg->marker_self); + + /* For some categories, we can also collect the relevant information + for the other segment. */ + switch (kind) { + case WRITE_WRITE_CONTENTION: + marker_fetch_obj_write(other_segment_num, obj, other_marker); + break; + case INEVITABLE_CONTENTION: + assert(abort_other == false); + other_marker[0] = other_pseg->marker_inev[0]; + other_marker[1] = other_pseg->marker_inev[1]; + break; + default: + other_marker[0] = 0; + other_marker[1] = 0; + break; + } + + marker_expand(other_marker, other_segment_base, + abort_other ? other_pseg->marker_self + : my_pseg->marker_other); + + if (abort_other && other_pseg->marker_self[0] == 0) { + if (kind == WRITE_READ_CONTENTION) + strcpy(other_pseg->marker_self, ""); + else + strcpy(other_pseg->marker_self, ""); + } + + release_marker_lock(other_segment_base); +} + +static void marker_fetch_inev(void) +{ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->marker_inev[0] = marker[0]; + STM_PSEGMENT->marker_inev[1] = marker[1]; +} diff --git a/rpython/translator/stm/src_stm/stm/marker.h b/rpython/translator/stm/src_stm/stm/marker.h --- a/rpython/translator/stm/src_stm/stm/marker.h +++ b/rpython/translator/stm/src_stm/stm/marker.h @@ -1,6 +1,13 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg); +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); +static void marker_fetch_inev(void); +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker); +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -216,7 +216,9 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) { + acquire_privatization_lock(); synchronize_object_now(obj); + release_privatization_lock(); } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); @@ -233,6 +235,18 @@ _collect_now(item)); } +static void collect_roots_from_markers(uintptr_t num_old) +{ + /* visit the marker objects */ + struct list_s *mlst = STM_PSEGMENT->modified_old_objects_markers; + STM_PSEGMENT->modified_old_objects_markers_num_old = list_count(mlst); + uintptr_t i, total = list_count(mlst); + assert((total & 1) == 0); + for (i = num_old + 1; i < total; i += 2) { + minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); + } +} + static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ @@ -282,6 +296,8 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); + acquire_marker_lock(STM_SEGMENT->segment_base); + STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { /* 'STM_PSEGMENT->overflow_number' is used now by this collection, @@ -297,6 +313,7 @@ /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ + uintptr_t num_old; if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); @@ -306,7 +323,12 @@ into objects_pointing_to_nursery, but instead we use the following shortcut */ collect_modified_old_objects(); + num_old = 0; } + else + num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; + + collect_roots_from_markers(num_old); collect_roots_in_nursery(); @@ -319,6 +341,8 @@ assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); + + release_marker_lock(STM_SEGMENT->segment_base); } static void minor_collection(bool commit) diff --git a/rpython/translator/stm/src_stm/stm/nursery.h b/rpython/translator/stm/src_stm/stm/nursery.h --- a/rpython/translator/stm/src_stm/stm/nursery.h +++ b/rpython/translator/stm/src_stm/stm/nursery.h @@ -2,6 +2,7 @@ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ #define NSE_SIGPAUSE STM_TIME_WAIT_OTHER +#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON static uint32_t highest_overflow_number; diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -109,18 +109,20 @@ { /* check this thread's 'pages_privatized' bit */ uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + volatile struct page_shared_s *ps = (volatile struct page_shared_s *) + &pages_privatized[pagenum - PAGE_FLAG_START]; if (ps->by_segment & bitmask) { /* the page is already privatized; nothing to do */ return; } -#ifndef NDEBUG - spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + spinlock_acquire(get_priv_segment(i)->privatization_lock); + } /* add this thread's 'pages_privatized' bit */ - __sync_fetch_and_add(&ps->by_segment, bitmask); + ps->by_segment |= bitmask; /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -134,9 +136,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); -#ifndef NDEBUG - spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + for (i = NB_SEGMENTS; i >= 1; i--) { + spinlock_release(get_priv_segment(i)->privatization_lock); + } } static void _page_do_reshare(long segnum, uintptr_t pagenum) diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -35,20 +35,6 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; -/* Rules for concurrent access to this array, possibly with is_private_page(): - - - we clear bits only during major collection, when all threads are - synchronized anyway - - - we set only the bit corresponding to our segment number, using - an atomic addition; and we do it _before_ we actually make the - page private. - - - concurrently, other threads checking the bits might (rarely) - get the answer 'true' to is_private_page() even though it is not - actually private yet. This inconsistency is in the direction - that we want for synchronize_object_now(). -*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -73,7 +59,3 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } - -#ifndef NDEBUG -static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; -#endif diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -79,6 +79,7 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->modified_old_objects_markers = list_create(); pr->young_weakrefs = list_create(); pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); @@ -116,6 +117,7 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->modified_old_objects_markers); list_free(pr->young_weakrefs); list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -3,6 +3,10 @@ #include #include +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + /* Each segment can be in one of three possible states, described by the segment variable 'safe_point': @@ -261,6 +265,18 @@ static bool _safe_points_requested = false; #endif +static void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) +{ + assert(_has_mutex()); + /* never overwrite abort signals or safepoint requests + (too messy to deal with) */ + if (!other_pseg->signalled_to_commit_soon + && !is_abort(other_pseg->pub.nursery_end) + && !pause_signalled) { + other_pseg->pub.nursery_end = NSE_SIGCOMMITSOON; + } +} + static void signal_everybody_to_pause_running(void) { assert(_safe_points_requested == false); @@ -324,7 +340,21 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) break; /* no safe point requested */ + if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); + } + + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + if (!pause_signalled) { + STM_SEGMENT->nursery_end = NURSERY_END; + break; + } + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + } assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + assert(pause_signalled); /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -26,10 +26,11 @@ return oldstate; } -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) { TIMING_CHANGE(tl, newstate); + return elasped; } static void timing_end_transaction(enum stm_time_e attribute_to) @@ -59,6 +60,7 @@ "wait write read", "wait inevitable", "wait other", + "sync commit soon", "bookkeeping", "minor gc", "major gc", diff --git a/rpython/translator/stm/src_stm/stm/timing.h b/rpython/translator/stm/src_stm/stm/timing.h --- a/rpython/translator/stm/src_stm/stm/timing.h +++ b/rpython/translator/stm/src_stm/stm/timing.h @@ -9,7 +9,7 @@ } static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -67,6 +67,7 @@ STM_TIME_WAIT_WRITE_READ, STM_TIME_WAIT_INEVITABLE, STM_TIME_WAIT_OTHER, + STM_TIME_SYNC_COMMIT_SOON, STM_TIME_BOOKKEEPING, STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, @@ -217,9 +218,13 @@ The "size rounded up" must be a multiple of 8 and at least 16. "Tracing" an object means enumerating all GC references in it, by invoking the callback passed as argument. + stmcb_commit_soon() is called when it is advised to commit + the transaction as soon as possible in order to avoid conflicts + or improve performance in general. */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +extern void stmcb_commit_soon(void); /* Allocate an object of the given size, which must be a multiple From noreply at buildbot.pypy.org Mon Apr 28 19:09:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 19:09:29 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add a dummy stmcb_commit_soon(). Fix me! Message-ID: <20140428170929.D7A891C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71046:3e0cdc451004 Date: 2014-04-28 19:02 +0200 http://bitbucket.org/pypy/pypy/changeset/3e0cdc451004/ Log: Add a dummy stmcb_commit_soon(). Fix me! diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -231,3 +231,5 @@ _pypy_stm_inev_state(); stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT"); } + +void stmcb_commit_soon(void) { /*XXX FIXME*/ } From noreply at buildbot.pypy.org Mon Apr 28 19:09:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 19:09:30 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Test and implementation of a way to grab the longest_abort_info from RPython Message-ID: <20140428170930.EF8CB1C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71047:5971a915100c Date: 2014-04-28 19:05 +0200 http://bitbucket.org/pypy/pypy/changeset/5971a915100c/ Log: Test and implementation of a way to grab the longest_abort_info from RPython diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -132,6 +132,16 @@ def pop_marker(): llop.stm_pop_marker(lltype.Void) +def longest_abort_info(): + state = llop.stm_longest_marker_state(lltype.Signed) + time = llop.stm_longest_marker_time(lltype.Float) + cself = llop.stm_longest_marker_self(rffi.CCHARP) + cother = llop.stm_longest_marker_other(rffi.CCHARP) + return (state, time, rffi.charp2str(cself), rffi.charp2str(cother)) + +def reset_longest_abort_info(): + llop.stm_reset_longest_marker_state(lltype.Void) + # ____________________________________________________________ def make_perform_transaction(func, CONTAINERP): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -457,22 +457,11 @@ 'stm_expand_marker': LLOp(), 'stm_setup_expand_marker_for_pypy': LLOp(), -## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), -## 'stm_become_inevitable': LLOp(canmallocgc=True), -## 'stm_stop_all_other_threads': LLOp(canmallocgc=True), -## 'stm_partial_commit_and_resume_other_threads': LLOp(canmallocgc=True), -## 'stm_minor_collect': LLOp(canmallocgc=True), -## 'stm_major_collect': LLOp(canmallocgc=True), -## 'stm_get_tid': LLOp(canfold=True), -## 'stm_ptr_eq': LLOp(canfold=True), - -## 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), - -## 'stm_get_adr_of_private_rev_num':LLOp(), -## 'stm_get_adr_of_read_barrier_cache':LLOp(), -## 'stm_get_adr_of_nursery_current': LLOp(), -## 'stm_get_adr_of_nursery_nextlimit': LLOp(), -## 'stm_get_adr_of_active': LLOp(), + 'stm_longest_marker_state': LLOp(), + 'stm_longest_marker_time': LLOp(), + 'stm_longest_marker_self': LLOp(), + 'stm_longest_marker_other': LLOp(), + 'stm_reset_longest_marker_state': LLOp(), # __________ address operations __________ diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -239,3 +239,25 @@ assert len(offsets) == 4 return 'pypy_stm_setup_expand_marker(%s, %s, %s, %s);' % ( offsets[0], offsets[1], offsets[2], offsets[3]) + +def stm_longest_marker_state(funcgen, op): + result = funcgen.expr(op.result) + return '%s = (Signed)stm_thread_local.longest_marker_state;' % (result,) + +def stm_longest_marker_time(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_thread_local.longest_marker_time;' % (result,) + +def stm_longest_marker_self(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_thread_local.longest_marker_self;' % (result,) + +def stm_longest_marker_other(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_thread_local.longest_marker_other;' % (result,) + +def stm_reset_longest_marker_state(funcgen, op): + return ('stm_thread_local.longest_marker_state = 0;\n' + 'stm_thread_local.longest_marker_time = 0.0;\n' + 'stm_thread_local.longest_marker_self[0] = 0;\n' + 'stm_thread_local.longest_marker_other[0] = 0;') diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -237,7 +237,6 @@ assert 'ok\n' in data def test_abort_info(self): - py.test.skip("goes away") class Parent(object): pass class Foobar(Parent): @@ -249,19 +248,12 @@ globf.xy = 100 + retry_counter def check(_, retry_counter): - rstm.abort_info_push(globf, ('[', 'xy', ']', 'yx')) setxy(globf, retry_counter) if retry_counter < 3: rstm.abort_and_retry() - # - last = rstm.charp_inspect_abort_info() - if last: - print rffi.charp2str(last) - else: - print 'got abort_info=NULL!' - print int(bool(rstm.charp_inspect_abort_info())) - # - rstm.abort_info_pop(2) + print rstm.longest_abort_info() + rstm.reset_longest_abort_info() + print rstm.longest_abort_info() return 0 PS = lltype.Ptr(lltype.GcStruct('S', ('got_exception', OBJECTPTR))) @@ -275,7 +267,10 @@ return 0 t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') - assert 'li102ee10:hi there 3e\n0\n' in data + # + # 6 == STM_TIME_RUN_ABORTED_OTHER + import re; r = re.compile(r'\(6, 0.00\d+, , \)\n\(0, 0.00+, , \)\n$') + assert r.match(data) def test_weakref(self): import weakref From noreply at buildbot.pypy.org Mon Apr 28 19:09:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 19:09:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Give access to this info from app-level (untested so far) Message-ID: <20140428170932.15E7E1C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71048:9a04fbeb89a3 Date: 2014-04-28 19:08 +0200 http://bitbucket.org/pypy/pypy/changeset/9a04fbeb89a3/ Log: Give access to this info from app-level (untested so far) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -39,8 +39,8 @@ '_atomic_enter': 'interp_atomic.atomic_enter', '_exclusive_atomic_enter': 'interp_atomic.exclusive_atomic_enter', '_atomic_exit': 'interp_atomic.atomic_exit', - 'last_abort_info': 'interp_atomic.last_abort_info', - 'discard_last_abort_info': 'interp_atomic.discard_last_abort_info', + 'longest_abort_info': 'interp_atomic.longest_abort_info', + 'reset_longest_abort_info':'interp_atomic.reset_longest_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', } def activate(self, space): diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -59,8 +59,16 @@ else: return space.wrap(1) -def last_abort_info(space): - return space.w_None +def longest_abort_info(space): + if space.config.translation.stm: + from rpython.rlib.rstm import longest_abort_info + a, b, c, d = longest_abort_info() + return space.newtuple([space.wrap(a), space.wrap(b), + space.wrap(c), space.wrap(d)]) + else: + return space.w_None -def discard_last_abort_info(space): - pass +def reset_longest_abort_info(space): + if space.config.translation.stm: + from rpython.rlib.rstm import reset_longest_abort_info + reset_longest_abort_info() From noreply at buildbot.pypy.org Mon Apr 28 19:25:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 19:25:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Test and fix: the "counter" argument we pass to the callback Message-ID: <20140428172507.1FF111C01F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71049:f6dab9f69e7c Date: 2014-04-28 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/f6dab9f69e7c/ Log: Test and fix: the "counter" argument we pass to the callback from pypy_stm_perform_transaction() should start at 0. diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -140,6 +140,7 @@ STM_PUSH_ROOT(stm_thread_local, arg); while (1) { + long counter; if (pypy_stm_ready_atomic == 1) { /* Not in an atomic transaction; but it might be an inevitable @@ -156,11 +157,13 @@ declared below than this point only. */ while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } + counter = v_counter; pypy_stm_start_transaction(&jmpbuf, &v_counter); } else { /* In an atomic transaction */ assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); + counter = v_counter; } /* invoke the callback in the new transaction */ @@ -168,7 +171,7 @@ assert(v_old_shadowstack == stm_thread_local.shadowstack - 1); STM_PUSH_ROOT(stm_thread_local, arg); - long result = v_callback(arg, v_counter); + long result = v_callback(arg, counter); if (result <= 0) break; v_counter = 0; diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -130,6 +130,23 @@ data, dataerr = cbuilder.cmdexec('4 5000', err=True) assert 'check ok!' in data + def test_retry_counter_starts_at_zero(self): + # + def check(foobar, retry_counter): + print '<', retry_counter, '>' + return 0 + # + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) + def entry_point(argv): + perform_transaction(lltype.malloc(S)) + return 0 + # + t, cbuilder = self.compile(entry_point, backendopt=True) + data = cbuilder.cmdexec('a b c d') + assert '< 0 >\n' in data + def test_bug1(self): # def check(foobar, retry_counter): From noreply at buildbot.pypy.org Mon Apr 28 20:01:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Apr 2014 20:01:45 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Jit Fix Message-ID: <20140428180145.059EF1D2933@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71050:92c1768af884 Date: 2014-04-28 19:59 +0200 http://bitbucket.org/pypy/pypy/changeset/92c1768af884/ Log: Jit Fix diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -132,6 +132,7 @@ def pop_marker(): llop.stm_pop_marker(lltype.Void) + at dont_look_inside def longest_abort_info(): state = llop.stm_longest_marker_state(lltype.Signed) time = llop.stm_longest_marker_time(lltype.Float) @@ -139,6 +140,7 @@ cother = llop.stm_longest_marker_other(rffi.CCHARP) return (state, time, rffi.charp2str(cself), rffi.charp2str(cother)) + at dont_look_inside def reset_longest_abort_info(): llop.stm_reset_longest_marker_state(lltype.Void) From noreply at buildbot.pypy.org Mon Apr 28 20:32:57 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 28 Apr 2014 20:32:57 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: clarify that cffi is distributed with pypy Message-ID: <20140428183257.F409D1C01F7@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-0.8 Changeset: r1508:0884285cc707 Date: 2014-04-28 18:32 +0000 http://bitbucket.org/cffi/cffi/changeset/0884285cc707/ Log: clarify that cffi is distributed with pypy diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -45,7 +45,7 @@ Installation and Status ======================================================= -Quick installation: +Quick installation (for cpython, cffi is distributed with PyPy): * ``pip install cffi`` @@ -60,10 +60,10 @@ left. It supports CPython 2.6; 2.7; 3.x (tested with 3.2 and 3.3); -and PyPy 2.0 beta2 or later. +and is distrubuted with PyPy 2.0 beta2 or later. Its speed is comparable to ctypes on CPython (a bit faster but a higher -warm-up time). It is already faster on PyPy (1.5x-2x), but not yet +warm-up time). It is already faster than ctypes on PyPy (1.5x-2x), but not yet *much* faster; stay tuned. Requirements: From noreply at buildbot.pypy.org Mon Apr 28 21:49:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 28 Apr 2014 21:49:43 +0200 (CEST) Subject: [pypy-commit] pypy default: rework, call the release "Easier Than Ever" for want of a better name Message-ID: <20140428194943.A67AD1C01CB@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71051:8bc78794a75b Date: 2014-04-28 22:49 +0300 http://bitbucket.org/pypy/pypy/changeset/8bc78794a75b/ Log: rework, call the release "Easier Than Ever" for want of a better name diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,11 +1,17 @@ ======================================= -PyPy 2.3 - XXXX TODO +PyPy 2.3 - Easier Than Ever ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. -This release also contains several bugfixes and performance improvements. +This release also contains several bugfixes and performance improvements, +many generated by real users finding corner cases our `TDD`_ methods missed. +`CFFI`_ has made it easier than ever to use existing C code with both cpython +and PyPy, easing the transition for packages like `cryptography`_, `Pillow`_ +(Python Imaging Library [Fork]), a basic port of `pygame-cffi`_, and others. + +PyPy can now be embedded in a hosting application, for instance inside `uWSGI`_ You can download the PyPy 2.3 release here: @@ -17,72 +23,112 @@ Please consider donating more so we can finish those projects! The three projects are: -* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. +* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. -* STM (software transactional memory): a preview will be released very soon, - as soon as we fix a few bugs +* `STM`_ (software transactional memory): a preview will be released very soon, + once we fix a few bugs -* NumPy: the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. -.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org +_`Py3k`: http://pypy.org/py3donate.html +_`STM`: http://pypy.org/tmdonate2.html +_ `Numpy`: http://pypy.org/numpydonate.html +_`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +_`CFFI`: http://cffi.readthedocs.org +_`cryptography`: https://cryptography.io +_`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +_`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +_`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html What is PyPy? ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; +note that the latest cpython is not faster than cpython 2.7.2) due to its integrated tracing JIT compiler. -This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows -32, or ARM (ARMv6 or ARMv7, with VFPv3). +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. -Work on the native Windows 64 is still stalling, we would welcome a volunteer -to handle that. +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. -.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +_`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +_`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -* Our Garbage Collector is now "incremental". It should avoid almost - all pauses due to a major collection taking place. Previously, it - would pause the program (rarely) to walk all live objects, which - could take arbitrarily long if your process is using a whole lot of - RAM. Now the same work is done in steps. This should make PyPy - more responsive, e.g. in games. There are still other pauses, from - the GC and the JIT, but they should be on the order of 5 - milliseconds each. +Bugfixes +-------- -* The JIT counters for hot code were never reset, which meant that a - process running for long enough would eventually JIT-compile more - and more rarely executed code. Not only is it useless to compile - such code, but as more compiled code means more memory used, this - gives the impression of a memory leak. This has been tentatively - fixed by decreasing the counters from time to time. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. -* NumPy has been split: now PyPy only contains the core module, called - ``_numpypy``. The ``numpy`` module itself has been moved to - ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. - You need to install NumPy separately with a virtualenv: +* The ARM port no longer crashes on unaligned memory access to floats and doubles, + and singlefloats are supported in the JIT. + +* Generators are faster since they now skip unecessary cleanup + +* A first time contributor simplified JIT traces by adding integer bound + propagation in indexing and logical operations. + +* Optimize consecutive dictionary lookups of the same key in a chain + +* Our extensive pre-translation test suite now runs nightly on more platforms + +* Fix issues with reimporting builtin modules + +* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port + +`HippyVM`_: http://www.hippyvm.com + +New Platforms and Features +-------------------------- + +* Support for OpenBSD + +* Code cleanup: we continue to prune out old and unused code, and to refactor + large parts of the codebase. We have sepearated rpython from the PyPy python + interpreter, and rpython is seeing use in other dynamic language projects. + +* Support for precompiled headers in the build process for MSVC + +* Support for objects with __int__ and __float__ methods + +* Tweak support of errno in cpyext (the PyPy implemenation of the capi) + + +Numpy +----- +Numpy support has been split into a builtin ``_numpy`` module and a +fork of the numpy code base adapted to pypy at + ``https://bitbucket.org/pypy/numpy``. +You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; or directly: ``git clone https://bitbucket.org/pypy/numpy.git``; ``cd numpy``; ``pypy setup.py install``. -* non-inlined calls have less overhead +* NumPy support has been improved, many failures in indexing, dtypes, + and scalars were corrected. We are slowly approaching our goal of passing + the numpy test suite. We still do not support object or unicode ndarrays. -* Things that use ``sys.set_trace`` are now JITted (like coverage) +* speed of iteration in dot() is now within 1.5x of the numpy c + implementation (without BLAS acceleration). Since the same array + iterator is used throughout the ``_numpy`` module, speed increases should + be apparent in all Numpy functionality. -* JSON decoding is now very fast (JSON encoding was already very fast) +* Most of the core functionality of nditer has been implemented. -* various buffer copying methods experience speedups (like list-of-ints to - ``int[]`` buffer from cffi) +* A cffi-based ``numpy.random`` module is available as a branch in the numpy + repository, it will be merged soon after this release. -* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, - including ``os.startfile()`` on Windows and a handful of rare ones - on Posix. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. -* numpy has a rudimentary C API that cooperates with ``cpyext`` +Cheers +The PyPy Team -Cheers, -Armin Rigo and Maciej Fijalkowski From noreply at buildbot.pypy.org Mon Apr 28 22:03:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 28 Apr 2014 22:03:17 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into release branch Message-ID: <20140428200317.172801D24A5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71052:80843924f836 Date: 2014-04-28 23:00 +0300 http://bitbucket.org/pypy/pypy/changeset/80843924f836/ Log: merge default into release branch diff too long, truncating to 2000 out of 15405 lines diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -51,7 +51,7 @@ for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) - shutil.rmtree(self.temp_dir, True) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,7 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) - f.close() + f.close() def test_head(self): response = self.request( diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,6 +1,7 @@ import os, sys, imp import tempfile, binascii + def get_hashed_dir(cfile): with open(cfile,'r') as fid: content = fid.read() @@ -15,7 +16,7 @@ output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) - return output_dir + return output_dir def _get_c_extension_suffix(): diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + with fp: + imp.load_module('_testcapi', fp, filename, description) except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. @@ -577,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -76,7 +76,7 @@ .. code-block:: console $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,11 +1,17 @@ ======================================= -PyPy 2.3 - XXXX TODO +PyPy 2.3 - Easier Than Ever ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. -This release also contains several bugfixes and performance improvements. +This release also contains several bugfixes and performance improvements, +many generated by real users finding corner cases our `TDD`_ methods missed. +`CFFI`_ has made it easier than ever to use existing C code with both cpython +and PyPy, easing the transition for packages like `cryptography`_, `Pillow`_ +(Python Imaging Library [Fork]), a basic port of `pygame-cffi`_, and others. + +PyPy can now be embedded in a hosting application, for instance inside `uWSGI`_ You can download the PyPy 2.3 release here: @@ -17,72 +23,112 @@ Please consider donating more so we can finish those projects! The three projects are: -* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. +* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. -* STM (software transactional memory): a preview will be released very soon, - as soon as we fix a few bugs +* `STM`_ (software transactional memory): a preview will be released very soon, + once we fix a few bugs -* NumPy: the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. -.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org +_`Py3k`: http://pypy.org/py3donate.html +_`STM`: http://pypy.org/tmdonate2.html +_ `Numpy`: http://pypy.org/numpydonate.html +_`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +_`CFFI`: http://cffi.readthedocs.org +_`cryptography`: https://cryptography.io +_`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +_`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +_`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html What is PyPy? ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; +note that the latest cpython is not faster than cpython 2.7.2) due to its integrated tracing JIT compiler. -This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows -32, or ARM (ARMv6 or ARMv7, with VFPv3). +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. -Work on the native Windows 64 is still stalling, we would welcome a volunteer -to handle that. +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. -.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +_`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +_`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -* Our Garbage Collector is now "incremental". It should avoid almost - all pauses due to a major collection taking place. Previously, it - would pause the program (rarely) to walk all live objects, which - could take arbitrarily long if your process is using a whole lot of - RAM. Now the same work is done in steps. This should make PyPy - more responsive, e.g. in games. There are still other pauses, from - the GC and the JIT, but they should be on the order of 5 - milliseconds each. +Bugfixes +-------- -* The JIT counters for hot code were never reset, which meant that a - process running for long enough would eventually JIT-compile more - and more rarely executed code. Not only is it useless to compile - such code, but as more compiled code means more memory used, this - gives the impression of a memory leak. This has been tentatively - fixed by decreasing the counters from time to time. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. -* NumPy has been split: now PyPy only contains the core module, called - ``_numpypy``. The ``numpy`` module itself has been moved to - ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. - You need to install NumPy separately with a virtualenv: +* The ARM port no longer crashes on unaligned memory access to floats and doubles, + and singlefloats are supported in the JIT. + +* Generators are faster since they now skip unecessary cleanup + +* A first time contributor simplified JIT traces by adding integer bound + propagation in indexing and logical operations. + +* Optimize consecutive dictionary lookups of the same key in a chain + +* Our extensive pre-translation test suite now runs nightly on more platforms + +* Fix issues with reimporting builtin modules + +* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port + +`HippyVM`_: http://www.hippyvm.com + +New Platforms and Features +-------------------------- + +* Support for OpenBSD + +* Code cleanup: we continue to prune out old and unused code, and to refactor + large parts of the codebase. We have sepearated rpython from the PyPy python + interpreter, and rpython is seeing use in other dynamic language projects. + +* Support for precompiled headers in the build process for MSVC + +* Support for objects with __int__ and __float__ methods + +* Tweak support of errno in cpyext (the PyPy implemenation of the capi) + + +Numpy +----- +Numpy support has been split into a builtin ``_numpy`` module and a +fork of the numpy code base adapted to pypy at + ``https://bitbucket.org/pypy/numpy``. +You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; or directly: ``git clone https://bitbucket.org/pypy/numpy.git``; ``cd numpy``; ``pypy setup.py install``. -* non-inlined calls have less overhead +* NumPy support has been improved, many failures in indexing, dtypes, + and scalars were corrected. We are slowly approaching our goal of passing + the numpy test suite. We still do not support object or unicode ndarrays. -* Things that use ``sys.set_trace`` are now JITted (like coverage) +* speed of iteration in dot() is now within 1.5x of the numpy c + implementation (without BLAS acceleration). Since the same array + iterator is used throughout the ``_numpy`` module, speed increases should + be apparent in all Numpy functionality. -* JSON decoding is now very fast (JSON encoding was already very fast) +* Most of the core functionality of nditer has been implemented. -* various buffer copying methods experience speedups (like list-of-ints to - ``int[]`` buffer from cffi) +* A cffi-based ``numpy.random`` module is available as a branch in the numpy + repository, it will be merged soon after this release. -* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, - including ``os.startfile()`` on Windows and a handful of rare ones - on Posix. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. -* numpy has a rudimentary C API that cooperates with ``cpyext`` +Cheers +The PyPy Team -Cheers, -Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -42,6 +42,10 @@ use this sandboxed PyPy from a regular Python interpreter (CPython, or an unsandboxed PyPy). Contributions welcome. +.. warning:: + + Tested with PyPy2. May not work out of the box with PyPy3. + Overview -------- diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -0,0 +1,154 @@ +======================= +What's new in PyPy 2.2+ +======================= + +.. this is a revision shortly after release-2.2.x +.. startrev: 4cd1bc8b3111 + +.. branch: release-2.2.x + +.. branch: numpy-newbyteorder +Clean up numpy types, add newbyteorder functionality + +.. branch: windows-packaging +Package tk/tcl runtime with win32 + +.. branch: armhf-singlefloat +JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards + +.. branch: remove-intlong-smm +kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose + +.. branch: stdlib-2.7.6 +Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 + +.. branch: latest-improve-doc +Fix broken links in documentation + +.. branch: ast-issue1673 +fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when +there is missing field + +.. branch: issue1514 +Fix issues with reimporting builtin modules + +.. branch: numpypy-nditer +Implement the core of nditer, without many of the fancy flags (external_loop, buffered) + +.. branch: numpy-speed +Separate iterator from its state so jit can optimize better + +.. branch: numpy-searchsorted +Implement searchsorted without sorter kwarg + +.. branch: openbsd-lib-prefix +add 'lib' prefix to link libraries on OpenBSD + +.. branch: small-unroll-improvements +Improve optimization of small allocation-heavy loops in the JIT diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,151 +1,14 @@ ======================= -What's new in PyPy 2.2+ +What's new in PyPy 2.3+ ======================= -.. this is a revision shortly after release-2.2.x -.. startrev: 4cd1bc8b3111 +.. this is a revision shortly after release-2.3.x +.. startrev: ba569fe1efdb -.. branch: release-2.2.x +.. branch: small-unroll-improvements +Improve optimiziation of small allocation-heavy loops in the JIT -.. branch: numpy-newbyteorder -Clean up numpy types, add newbyteorder functionality - -.. branch: windows-packaging -Package tk/tcl runtime with win32 - -.. branch: armhf-singlefloat -JIT support for singlefloats on ARM using the hardfloat ABI - -.. branch: voidtype_strformat -Better support for record numpy arrays - -.. branch: osx-eci-frameworks-makefile -OSX: Ensure frameworks end up in Makefile when specified in External compilation info - -.. branch: less-stringly-ops -Use subclasses of SpaceOperation instead of SpaceOperator objects. -Random cleanups in flowspace and annotator. - -.. branch: ndarray-buffer -adds support for the buffer= argument to the ndarray ctor - -.. branch: better_ftime_detect2 -On OpenBSD do not pull in libcompat.a as it is about to be removed. -And more generally, if you have gettimeofday(2) you will not need ftime(3). - -.. branch: timeb_h -Remove dependency upon on OpenBSD. This will be disappearing -along with libcompat.a. - -.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 -Fix 3 broken links on PyPy published papers in docs. - -.. branch: jit-ordereddict - -.. branch: refactor-str-types -Remove multimethods on str/unicode/bytearray and make the implementations share code. - -.. branch: remove-del-from-generatoriterator -Speed up generators that don't yield inside try or wait blocks by skipping -unnecessary cleanup. - -.. branch: annotator -Remove FlowObjSpace. -Improve cohesion between rpython.flowspace and rpython.annotator. - -.. branch: detect-immutable-fields -mapdicts keep track of whether or not an attribute is every assigned to -multiple times. If it's only assigned once then an elidable lookup is used when -possible. - -.. branch: precompiled-headers -Create a Makefile using precompiled headers for MSVC platforms. -The downside is a messy nmake-compatible Makefile. Since gcc shows minimal -speedup, it was not implemented. - -.. branch: camelot -With a properly configured 256-color terminal (TERM=...-256color), the -Mandelbrot set shown during translation now uses a range of 50 colours. -Essential! - -.. branch: NonConstant -Simplify implementation of NonConstant. - -.. branch: array-propagate-len -Kill some guards and operations in JIT traces by adding integer bounds -propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). - -.. branch: optimize-int-and -Optimize away INT_AND with constant mask of 1s that fully cover the bitrange -of other operand. - -.. branch: bounds-int-add-or -Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the -operands are positive to kill some guards - -.. branch: remove-intlong-smm -kills int/long/smalllong/bool multimethods - -.. branch: numpy-refactor -Cleanup micronumpy module - -.. branch: int_w-refactor -In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. - -.. branch: test-58c3d8552833 -Fix for getarrayitem_gc_pure optimization - -.. branch: simple-range-strategy -Implements SimpleRangeListStrategy for case range(n) where n is a positive number. -Makes some traces nicer by getting rid of multiplication for calculating loop counter -and propagates that n > 0 further to get rid of guards. - -.. branch: popen-pclose -Provide an exit status for popen'ed RFiles via pclose - -.. branch: stdlib-2.7.6 -Update stdlib to v2.7.6 - -.. branch: virtual-raw-store-load -Support for virtualizing raw_store/raw_load operations +.. branch: reflex-support .. branch: refactor-buffer-api -Separate the interp-level buffer API from the buffer type exposed to -app-level. The `Buffer` class is now used by `W_MemoryView` and -`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was -an alias to `Buffer`, which was wrappable itself. - -.. branch: improve-consecutive-dict-lookups -Improve the situation when dict lookups of the same key are performed in a chain - -.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 -.. branch: test_SetFromErrnoWithFilename_NULL -.. branch: test_SetFromErrnoWithFilename__tweaks - -.. branch: refactor_PyErr_SetFromErrnoWithFilename -Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext - -.. branch: win32-fixes4 -fix more tests for win32 - -.. branch: latest-improve-doc -Fix broken links in documentation - -.. branch: ast-issue1673 -fix ast classes __dict__ are always empty problem and fix the ast deepcopy issue when -there is missing field - -.. branch: issue1514 -Fix issues with reimporting builtin modules - -.. branch: numpypy-nditer -Implement the core of nditer, without many of the fancy flags (external_loop, buffered) - -.. branch: numpy-speed -Separate iterator from its state so jit can optimize better - -.. branch: numpy-searchsorted -Implement searchsorted without sorter kwarg - -.. branch: openbsd-lib-prefix -add 'lib' prefix to link libraries on OpenBSD +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) + compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX @@ -194,13 +194,37 @@ def immutable_unique_id(self, space): return None - def buffer_w(self, space): + def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_buffer): - return w_result.buffer_w(space) - self._typed_unwrap_error(space, "buffer") + return w_result.buffer_w(space, flags) + raise TypeError + + def readbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.readbuf_w(space) + raise TypeError + + def writebuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.writebuf_w(space) + raise TypeError + + def charbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.charbuf_w(space) + raise TypeError def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1340,25 +1364,111 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - def buffer_w(self, w_obj): - return w_obj.buffer_w(self) + BUF_SIMPLE = 0x0000 + BUF_WRITABLE = 0x0001 + BUF_FORMAT = 0x0004 + BUF_ND = 0x0008 + BUF_STRIDES = 0x0010 | BUF_ND + BUF_INDIRECT = 0x0100 | BUF_STRIDES - def rwbuffer_w(self, w_obj): - # returns a RWBuffer instance - from pypy.interpreter.buffer import RWBuffer - buffer = self.buffer_w(w_obj) - if not isinstance(buffer, RWBuffer): - raise OperationError(self.w_TypeError, - self.wrap('read-write buffer expected')) - return buffer + BUF_CONTIG_RO = BUF_ND + BUF_CONTIG = BUF_ND | BUF_WRITABLE - def bufferstr_new_w(self, w_obj): - # Implement the "new buffer interface" (new in Python 2.7) - # returning an unwrapped string. It doesn't accept unicode - # strings - buffer = self.buffer_w(w_obj) - return buffer.as_str() + BUF_FULL_RO = BUF_INDIRECT | BUF_FORMAT + BUF_FULL = BUF_INDIRECT | BUF_FORMAT | BUF_WRITABLE + def check_buf_flags(self, flags, readonly): + if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: + raise oefmt(self.w_BufferError, "Object is not writable.") + + def buffer_w(self, w_obj, flags): + # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) + try: + return w_obj.buffer_w(self, flags) + except TypeError: + raise oefmt(self.w_TypeError, + "'%T' does not have the buffer interface", w_obj) + + def readbuf_w(self, w_obj): + # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) + try: + return w_obj.readbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a readable buffer object") + + def writebuf_w(self, w_obj): + # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) + try: + return w_obj.writebuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a writeable buffer object") + + def charbuf_w(self, w_obj): + # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) + try: + return w_obj.charbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a character buffer object") + + def _getarg_error(self, expected, w_obj): + if self.is_none(w_obj): + name = "None" + else: + name = self.type(w_obj).get_module_type_name() + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + + @specialize.arg(1) + def getarg_w(self, code, w_obj): + if code == 'z*': + if self.is_none(w_obj): + return None + code = 's*' + if code == 's*': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.readbuf_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).readbuf_w(self) + try: + return w_obj.buffer_w(self, 0) + except TypeError: + pass + try: + return w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + elif code == 's#': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.str_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).str_w(self) + try: + return w_obj.readbuf_w(self).as_str() + except TypeError: + self._getarg_error("string or read-only buffer", w_obj) + elif code == 'w*': + try: + try: + return w_obj.buffer_w(self, self.BUF_WRITABLE) + except OperationError: + self._getarg_error("read-write buffer", w_obj) + except TypeError: + pass + try: + return w_obj.writebuf_w(self) + except TypeError: + self._getarg_error("read-write buffer", w_obj) + elif code == 't#': + try: + return w_obj.charbuf_w(self) + except TypeError: + self._getarg_error("string or read-only character buffer", w_obj) + else: + assert False + + # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): @@ -1373,8 +1483,18 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - buffer = self.buffer_w(w_obj) - return buffer.as_str() + try: + buf = w_obj.buffer_w(self, 0) + except TypeError: + pass + else: + return buf.as_str() + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + else: + return buf.as_str() def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): @@ -1721,6 +1841,7 @@ 'AssertionError', 'AttributeError', 'BaseException', + 'BufferError', 'DeprecationWarning', 'EOFError', 'EnvironmentError', diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py deleted file mode 100644 --- a/pypy/interpreter/buffer.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Buffer protocol support. -""" -from rpython.rlib.objectmodel import import_from_mixin - - -class Buffer(object): - """Abstract base class for buffers.""" - __slots__ = [] - - def getlength(self): - raise NotImplementedError - - def as_str(self): - "Returns an interp-level string with the whole content of the buffer." - # May be overridden. - return self.getslice(0, self.getlength(), 1, self.getlength()) - - def getitem(self, index): - "Returns the index'th character in the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def getslice(self, start, stop, step, size): - # May be overridden. No bounds checks. - return ''.join([self.getitem(i) for i in range(start, stop, step)]) - - def get_raw_address(self): - raise ValueError("no raw buffer") - - def is_writable(self): - return False - - -class RWBuffer(Buffer): - """Abstract base class for read-write buffers.""" - __slots__ = [] - - def is_writable(self): - return True - - def setitem(self, index, char): - "Write a character into the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def setslice(self, start, string): - # May be overridden. No bounds checks. - for i in range(len(string)): - self.setitem(start + i, string[i]) - - -class StringBuffer(Buffer): - __slots__ = ['value'] - - def __init__(self, value): - self.value = value - - def getlength(self): - return len(self.value) - - def as_str(self): - return self.value - - def getitem(self, index): - return self.value[index] - - def getslice(self, start, stop, step, size): - if size == 0: - return "" - if step == 1: - assert 0 <= start <= stop - return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) -# ____________________________________________________________ - - -class SubBufferMixin(object): - _attrs_ = ['buffer', 'offset', 'size'] - - def __init__(self, buffer, offset, size): - self.buffer = buffer - self.offset = offset - self.size = size - - def getlength(self): - at_most = self.buffer.getlength() - self.offset - if 0 <= self.size <= at_most: - return self.size - elif at_most >= 0: - return at_most - else: - return 0 - - def getitem(self, index): - return self.buffer.getitem(self.offset + index) - - def getslice(self, start, stop, step, size): - if start == stop: - return '' # otherwise, adding self.offset might make them - # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, - step, size) - - -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) - - -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) - - def setitem(self, index, char): - self.buffer.setitem(self.offset + index, char) - - def setslice(self, start, string): - if len(string) == 0: - return # otherwise, adding self.offset might make 'start' - # out of bounds - self.buffer.setslice(self.offset + start, string) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -299,9 +299,13 @@ """ self._application_traceback = traceback - at specialize.memo() + +class ClearedOpErr: + def __init__(self, space): + self.operr = OperationError(space.w_None, space.w_None) + def get_cleared_operation_error(space): - return OperationError(space.w_None, space.w_None) + return space.fromcache(ClearedOpErr).operr # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -205,11 +205,14 @@ def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! """Implements sys.exc_info(). Return an OperationError instance or None.""" - frame = self.gettopframe_nohidden() + frame = self.gettopframe() while frame: if frame.last_exception is not None: - return frame.last_exception - frame = self.getnextframe_nohidden(frame) + if (not frame.hide() or + frame.last_exception is + get_cleared_operation_error(self.space)): + return frame.last_exception + frame = frame.f_backref() return None def set_sys_exc_info(self, operror): diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -15,10 +15,11 @@ space.setitem(w_modules, w_main, mainmodule) return mainmodule + def compilecode(space, source, filename, cmd='exec'): w = space.wrap - w_code = space.builtin.call('compile', - w(source), w(filename), w(cmd), w(0), w(0)) + w_code = space.builtin.call( + 'compile', w(source), w(filename), w(cmd), w(0), w(0)) pycode = space.interp_w(eval.Code, w_code) return pycode @@ -28,7 +29,7 @@ cmd = 'eval' else: cmd = 'exec' - + try: if space is None: from pypy.objspace.std import StdObjSpace @@ -55,18 +56,22 @@ operationerr.record_interpreter_traceback() raise + def run_string(source, filename=None, space=None): _run_eval_string(source, filename, space, False) + def eval_string(source, filename=None, space=None): return _run_eval_string(source, filename, space, True) + def run_file(filename, space=None): - if __name__=='__main__': + if __name__ == '__main__': print "Running %r with %r" % (filename, space) istring = open(filename).read() run_string(istring, filename, space) + def run_module(module_name, args, space=None): """Implements PEP 338 'Executing modules as scripts', overwriting sys.argv[1:] using `args` and executing the module `module_name`. @@ -89,7 +94,6 @@ return space.call_function(w_run_module, w(module_name), space.w_None, w('__main__'), space.w_True) -# ____________________________________________________________ def run_toplevel(space, f, verbose=False): """Calls f() and handle all OperationErrors. diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -881,8 +881,8 @@ def LOAD_NAME(self, nameindex, next_instr): if self.w_locals is not self.w_globals: - w_varname = self.getname_w(nameindex) - w_value = self.space.finditem(self.w_locals, w_varname) + varname = self.getname_u(nameindex) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -22,22 +22,6 @@ compile; if absent or zero these statements do influence the compilation, in addition to any features explicitly specified. """ - - ast_node = None - w_ast_type = space.gettypeobject(ast.AST.typedef) - str_ = None - if space.isinstance_w(w_source, w_ast_type): - ast_node = space.interp_w(ast.mod, w_source) - ast_node.sync_app_attrs(space) - elif space.isinstance_w(w_source, space.w_unicode): - w_utf_8_source = space.call_method(w_source, "encode", - space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) - # This flag tells the parser to reject any coding cookies it sees. - flags |= consts.PyCF_SOURCE_IS_UTF8 - else: - str_ = space.str_w(w_source) - ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): @@ -53,14 +37,30 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - if ast_node is None: - if flags & consts.PyCF_ONLY_AST: - mod = ec.compiler.compile_to_ast(str_, filename, mode, flags) - return space.wrap(mod) - else: - code = ec.compiler.compile(str_, filename, mode, flags) + w_ast_type = space.gettypeobject(ast.AST.typedef) + if space.isinstance_w(w_source, w_ast_type): + ast_node = space.interp_w(ast.mod, w_source) + ast_node.sync_app_attrs(space) + code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + return space.wrap(code) + + if space.isinstance_w(w_source, space.w_unicode): + w_utf_8_source = space.call_method(w_source, "encode", + space.wrap("utf-8")) + str_ = space.str_w(w_utf_8_source) + # This flag tells the parser to reject any coding cookies it sees. + flags |= consts.PyCF_SOURCE_IS_UTF8 else: - code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + str_ = space.readbuf_w(w_source).as_str() + + if '\x00' in str_: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) + + if flags & consts.PyCF_ONLY_AST: + code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + else: + code = ec.compiler.compile(str_, filename, mode, flags) return space.wrap(code) diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -351,17 +351,17 @@ self.promote_step = promote_step def descr_new(space, w_subtype, w_start, w_stop=None, w_step=None): - start = _toint(space, w_start) + start = space.int_w(w_start) if space.is_none(w_step): # no step argument provided step = 1 promote_step = True else: - step = _toint(space, w_step) + step = space.int_w(w_step) promote_step = False if space.is_none(w_stop): # only 1 argument provided start, stop = 0, start else: - stop = _toint(space, w_stop) + stop = space.int_w(w_stop) howmany = get_len_of_range(space, start, stop, step) obj = space.allocate_instance(W_XRange, w_subtype) W_XRange.__init__(obj, space, start, howmany, step, promote_step) @@ -425,11 +425,6 @@ minint = -sys.maxint - 1 return minint if last < minint - step else last + step -def _toint(space, w_obj): - # this also supports float arguments. CPython still does, too. - # needs a bit more thinking in general... - return space.int_w(space.int(w_obj)) - W_XRange.typedef = TypeDef("xrange", __new__ = interp2app(W_XRange.descr_new.im_func), __repr__ = interp2app(W_XRange.descr_repr), @@ -441,6 +436,7 @@ ) W_XRange.typedef.acceptable_as_base_class = False + class W_XRangeIterator(W_Root): def __init__(self, space, current, remaining, step): self.space = space @@ -488,7 +484,10 @@ ) W_XRangeIterator.typedef.acceptable_as_base_class = False + class W_XRangeStepOneIterator(W_XRangeIterator): + _immutable_fields_ = ['stop'] + def __init__(self, space, start, stop): self.space = space self.current = start diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -311,14 +311,14 @@ def test_xrange_len(self): x = xrange(33) assert len(x) == 33 - x = xrange(33.2) - assert len(x) == 33 + exc = raises(TypeError, xrange, 33.2) + assert "integer" in str(exc.value) x = xrange(33,0,-1) assert len(x) == 33 x = xrange(33,0) assert len(x) == 0 - x = xrange(33,0.2) - assert len(x) == 0 + exc = raises(TypeError, xrange, 33, 0.2) + assert "integer" in str(exc.value) x = xrange(0,33) assert len(x) == 33 x = xrange(0,33,-1) @@ -490,6 +490,14 @@ def test_compile(self): co = compile('1+2', '?', 'eval') assert eval(co) == 3 + co = compile(buffer('1+2'), '?', 'eval') + assert eval(co) == 3 + exc = raises(TypeError, compile, chr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, unichr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, memoryview('1+2'), '?', 'eval') + assert str(exc.value) == "expected a readable buffer object" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') raises(ValueError, compile, '"\\xt"', '?', 'eval') diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -1,5 +1,4 @@ class AppTestMap: - def test_trivial_map_one_seq(self): assert map(lambda x: x+2, [1, 2, 3, 4]) == [3, 4, 5, 6] @@ -77,6 +76,7 @@ assert result == [(2, 7), (1, 6), (None, 5), (None, 4), (None, 3), (None, 2), (None, 1)] + class AppTestZip: def test_one_list(self): assert zip([1,2,3]) == [(1,), (2,), (3,)] @@ -93,6 +93,7 @@ yield None assert zip(Foo()) == [] + class AppTestReduce: def test_None(self): raises(TypeError, reduce, lambda x, y: x+y, [1,2,3], None) @@ -105,6 +106,7 @@ assert reduce(lambda x, y: x-y, [10, 2, 8]) == 0 assert reduce(lambda x, y: x-y, [2, 8], 10) == 0 + class AppTestFilter: def test_None(self): assert filter(None, ['a', 'b', 1, 0, None]) == ['a', 'b', 1] @@ -125,6 +127,7 @@ return i * 10 assert filter(lambda x: x != 20, T("abcd")) == (0, 10, 30) + class AppTestXRange: def test_xrange(self): x = xrange(2, 9, 3) @@ -155,7 +158,8 @@ assert list(xrange(0, 10, A())) == [0, 5] def test_xrange_float(self): - assert list(xrange(0.1, 2.0, 1.1)) == [0, 1] + exc = raises(TypeError, xrange, 0.1, 2.0, 1.1) + assert "integer" in str(exc.value) def test_xrange_long(self): import sys @@ -218,6 +222,7 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) + class AppTestApply: def test_apply(self): def f(*args, **kw): @@ -228,6 +233,7 @@ assert apply(f, args) == (args, {}) assert apply(f, args, kw) == (args, kw) + class AppTestAllAny: """ These are copied directly and replicated from the Python 2.5 source code. @@ -277,6 +283,7 @@ S = [10, 20, 30] assert any([x > 42 for x in S]) == False + class AppTestMinMax: def test_min(self): assert min(1, 2) == 1 diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,13 +2,16 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec -class ByteBuffer(RWBuffer): +class ByteBuffer(Buffer): + _immutable_ = True + def __init__(self, len): self.data = ['\x00'] * len + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -13,3 +13,18 @@ assert b[-1] == '*' assert b[-2] == '-' assert b[-3] == '+' + exc = raises(TypeError, "b[3] = 'abc'") + assert str(exc.value) == "right operand must be a single byte" + exc = raises(TypeError, "b[3:5] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + exc = raises(TypeError, "b[3:7:2] = 'abc'") + assert str(exc.value) == "right operand length must match slice length" + + b = bytebuffer(10) + b[1:3] = 'xy' + assert str(b) == "\x00xy" + "\x00" * 7 + b[4:8:2] = 'zw' + assert str(b) == "\x00xy\x00z\x00w" + "\x00" * 3 + r = str(buffer(u'#')) + b[6:6+len(r)] = u'#' + assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,21 +1,22 @@ -from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.bufferobject import W_Buffer +from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw -class LLBuffer(RWBuffer): +class LLBuffer(Buffer): _immutable_ = True def __init__(self, raw_cdata, size): self.raw_cdata = raw_cdata self.size = size + self.readonly = False def getlength(self): return self.size @@ -32,7 +33,7 @@ def getslice(self, start, stop, step, size): if step == 1: return rffi.charpsize2str(rffi.ptradd(self.raw_cdata, start), size) - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) @@ -46,6 +47,14 @@ W_Buffer.__init__(self, buffer) self.keepalive = keepalive + def descr_setitem(self, space, w_index, w_obj): + try: + W_Buffer.descr_setitem(self, space, w_index, w_obj) + except OperationError as e: + if e.match(space, space.w_TypeError): + e.w_type = space.w_ValueError + raise + MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -72,8 +72,8 @@ 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', - 'charbuffer_encode': 'interp_codecs.buffer_encode', - 'readbuffer_encode': 'interp_codecs.buffer_encode', + 'charbuffer_encode': 'interp_codecs.charbuffer_encode', + 'readbuffer_encode': 'interp_codecs.readbuffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', 'charmap_encode' : 'interp_codecs.charmap_encode', 'escape_encode' : 'interp_codecs.escape_encode', diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -321,8 +321,14 @@ w_res = space.call_function(w_encoder, w_obj, space.wrap(errors)) return space.getitem(w_res, space.wrap(0)) - at unwrap_spec(s='bufferstr', errors='str_or_None') -def buffer_encode(space, s, errors='strict'): + at unwrap_spec(errors='str_or_None') +def readbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('s#', w_data) + return space.newtuple([space.wrap(s), space.wrap(len(s))]) + + at unwrap_spec(errors='str_or_None') +def charbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('t#', w_data) return space.newtuple([space.wrap(s), space.wrap(len(s))]) @unwrap_spec(errors=str) @@ -673,7 +679,7 @@ if space.isinstance_w(w_string, space.w_unicode): return space.newtuple([w_string, space.len(w_string)]) - string = space.str_w(w_string) + string = space.readbuf_w(w_string).as_str() if len(string) == 0: return space.newtuple([space.wrap(u''), space.wrap(0)]) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -276,7 +276,7 @@ assert enc == "a\x00\x00\x00" def test_unicode_internal_decode(self): - import sys + import sys, _codecs, array if sys.maxunicode == 65535: # UCS2 build if sys.byteorder == "big": bytes = "\x00a" @@ -291,6 +291,9 @@ bytes2 = "\x98\x00\x01\x00" assert bytes2.decode("unicode_internal") == u"\U00010098" assert bytes.decode("unicode_internal") == u"a" + assert _codecs.unicode_internal_decode(array.array('c', bytes))[0] == u"a" + exc = raises(TypeError, _codecs.unicode_internal_decode, memoryview(bytes)) + assert str(exc.value) == "expected a readable buffer object" def test_raw_unicode_escape(self): assert unicode("\u0663", "raw-unicode-escape") == u"\u0663" @@ -420,9 +423,13 @@ for (i, line) in enumerate(reader): assert line == s[i] - def test_array(self): + def test_buffer_encode(self): import _codecs, array - _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + assert _codecs.readbuffer_encode(array.array('c', 'spam')) == ('spam', 4) + exc = raises(TypeError, _codecs.charbuffer_encode, array.array('c', 'spam')) + assert str(exc.value) == "must be string or read-only character buffer, not array.array" + assert _codecs.readbuffer_encode(u"test") == ('test', 4) + assert _codecs.charbuffer_encode(u"test") == ('test', 4) def test_utf8sig(self): import codecs diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -1,6 +1,6 @@ from rpython.rlib.rstacklet import StackletThread from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef @@ -39,6 +39,7 @@ bottomframe.locals_stack_w[1] = w_callable bottomframe.locals_stack_w[2] = w_args bottomframe.locals_stack_w[3] = w_kwds + bottomframe.last_exception = get_cleared_operation_error(space) self.bottomframe = bottomframe # global_state.origin = self diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -684,3 +684,17 @@ execfile(self.translated, d) d['set_fast_mode']() d['test_various_depths']() + + def test_exc_info_doesnt_follow_continuations(self): + import sys + from _continuation import continulet + # + def f1(c1): + return sys.exc_info() + # + c1 = continulet(f1) + try: + 1 // 0 + except ZeroDivisionError: + got = c1.switch() + assert got == (None, None, None) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -267,9 +267,14 @@ def direct_write(self, w_data): space = self.space - if not self.binary and space.isinstance_w(w_data, space.w_unicode): - w_data = space.call_method(w_data, "encode", space.wrap(self.encoding), space.wrap(self.errors)) - data = space.bufferstr_w(w_data) + if self.binary: + data = space.getarg_w('s*', w_data).as_str() + else: + if space.isinstance_w(w_data, space.w_unicode): + w_data = space.call_method(w_data, "encode", + space.wrap(self.encoding), + space.wrap(self.errors)) + data = space.charbuf_w(w_data) self.do_direct_write(data) def do_direct_write(self, data): @@ -455,21 +460,24 @@ space = self.space self.check_closed() - w_iterator = space.iter(w_lines) - while True: - try: - w_line = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done + lines = space.fixedview(w_lines) + for i, w_line in enumerate(lines): + if not space.isinstance_w(w_line, space.w_str): + try: + line = w_line.charbuf_w(space) + except TypeError: + raise OperationError(space.w_TypeError, space.wrap( + "writelines() argument must be a sequence of strings")) + else: + lines[i] = space.wrap(line) + for w_line in lines: self.file_write(w_line) def file_readinto(self, w_rwbuffer): """readinto() -> Undocumented. Don't use this; it may go away.""" # XXX not the most efficient solution as it doesn't avoid the copying space = self.space - rwbuffer = space.rwbuffer_w(w_rwbuffer) + rwbuffer = space.writebuf_w(w_rwbuffer) w_data = self.file_read(rwbuffer.getlength()) data = space.str_w(w_data) rwbuffer.setslice(0, data) diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -386,6 +386,32 @@ assert len(somelines) > 200 assert somelines == lines[:len(somelines)] + def test_writelines(self): + import array + fn = self.temptestfile + with file(fn, 'w') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'r').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + f.writelines(['abc']) + f.writelines([u'def']) + exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + exc = raises(TypeError, f.writelines, [memoryview('jkl')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == ['abcdef'] + + with file(fn, 'wb') as f: + exc = raises(TypeError, f.writelines, ['abc', memoryview('def')]) + assert str(exc.value) == "writelines() argument must be a sequence of strings" + assert open(fn, 'rb').readlines() == [] + def test_nasty_writelines(self): # The stream lock should be released between writes fn = self.temptestfile diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -80,7 +80,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) @@ -101,11 +101,14 @@ readinto = interp2app(W_BufferedIOBase.readinto_w), ) -class RawBuffer(RWBuffer): +class RawBuffer(Buffer): + _immutable_ = True + def __init__(self, buf, start, length): self.buf = buf self.start = start self.length = length + self.readonly = False def getlength(self): return self.length @@ -698,7 +701,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -41,7 +41,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) size = rwbuffer.getlength() output = self.read(size) @@ -50,10 +50,7 @@ def write_w(self, space, w_data): self._check_closed(space) - if space.isinstance_w(w_data, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - "bytes string of buffer expected")) - buf = space.bufferstr_w(w_data) + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -333,7 +333,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() try: n = os.write(self.fd, data) @@ -366,7 +366,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,14 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == 'a\nb\ncxxxxx' @@ -235,7 +243,8 @@ import _io raw = _io.FileIO(self.tmpfile, 'w') f = _io.BufferedWriter(raw) - f.write("abcd") + f.write("ab") + f.write(u"cd") f.close() assert self.readfile() == "abcd" diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -38,6 +38,8 @@ From noreply at buildbot.pypy.org Mon Apr 28 22:29:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 28 Apr 2014 22:29:43 +0200 (CEST) Subject: [pypy-commit] pypy default: fix for windows, script still does not create proper commands Message-ID: <20140428202943.495481D2935@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71053:430c42d4c761 Date: 2014-04-28 23:28 +0300 http://bitbucket.org/pypy/pypy/changeset/430c42d4c761/ Log: fix for windows, script still does not create proper commands diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, pwd, urllib +import os, sys, urllib from twisted.internet import reactor, defer from twisted.python import log @@ -34,6 +34,13 @@ 'build-pypy-c-jit-linux-armel', ] +def get_user(): + if sys.platform == 'win32': + return os.environ['USERNAME'] + else: + import pwd + return pwd.getpwuid(os.getuid())[0] + def main(): #XXX: handle release tags #XXX: handle validity checks @@ -49,7 +56,7 @@ print 'Forcing', builder, '...' url = "http://buildbot.pypy.org/builders/" + builder + "/force" args = [ - ('username', pwd.getpwuid(os.getuid())[0]), + ('username', get_user()), ('revision', ''), ('submit', 'Force Build'), ('branch', branch), From noreply at buildbot.pypy.org Mon Apr 28 22:29:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 28 Apr 2014 22:29:44 +0200 (CEST) Subject: [pypy-commit] pypy default: whoops Message-ID: <20140428202944.76B1D1D2935@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71054:5a6c8e00ee88 Date: 2014-04-28 23:29 +0300 http://bitbucket.org/pypy/pypy/changeset/5a6c8e00ee88/ Log: whoops diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -30,15 +30,15 @@ * `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. -_`Py3k`: http://pypy.org/py3donate.html -_`STM`: http://pypy.org/tmdonate2.html -_ `Numpy`: http://pypy.org/numpydonate.html -_`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html -_`CFFI`: http://cffi.readthedocs.org -_`cryptography`: https://cryptography.io -_`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 -_`pygame-cffi`: https://github.com/CTPUG/pygame_cffi -_`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +.. _`CFFI`: http://cffi.readthedocs.org +.. _`cryptography`: https://cryptography.io +.. _`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +.. _`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +.. _`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html What is PyPy? ============= @@ -56,8 +56,8 @@ bit python is still stalling, we would welcome a volunteer to `handle that`_. -_`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org -_`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== @@ -84,7 +84,7 @@ * Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port -`HippyVM`_: http://www.hippyvm.com +.. _`HippyVM`: http://www.hippyvm.com New Platforms and Features -------------------------- From noreply at buildbot.pypy.org Mon Apr 28 22:53:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 28 Apr 2014 22:53:58 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_compiler on osx buildbot Message-ID: <20140428205358.20AC41C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71055:d86c4a65f830 Date: 2014-04-28 13:52 -0700 http://bitbucket.org/pypy/pypy/changeset/d86c4a65f830/ Log: fix test_compiler on osx buildbot diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,8 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc is not None and platform.cc.startswith(('gcc', 'clang')): +elif platform.cc is not None and \ + os.path.basename(platform.cc).startswith(('gcc', 'clang')): from rpython.rtyper.tool import rffi_platform COMPILER_INFO = 'GCC ' + rffi_platform.getdefinedstring('__VERSION__', '') else: From noreply at buildbot.pypy.org Tue Apr 29 02:37:14 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 29 Apr 2014 02:37:14 +0200 (CEST) Subject: [pypy-commit] pypy default: kill useless ClassMethods metaclass Message-ID: <20140429003714.6F5571D2932@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71056:c8e3b8cbc843 Date: 2014-04-29 01:36 +0100 http://bitbucket.org/pypy/pypy/changeset/c8e3b8cbc843/ Log: kill useless ClassMethods metaclass diff --git a/rpython/rtyper/module/ll_os_path.py b/rpython/rtyper/module/ll_os_path.py --- a/rpython/rtyper/module/ll_os_path.py +++ b/rpython/rtyper/module/ll_os_path.py @@ -6,26 +6,24 @@ import stat import os -from rpython.tool.staticmethods import ClassMethods # Does a path exist? # This is false for dangling symbolic links. -class BaseOsPath: - __metaclass__ = ClassMethods - +class BaseOsPath(object): + @classmethod def ll_os_path_exists(cls, path): """Test whether a path exists""" try: - st = os.stat(cls.from_rstr_nonnull(path)) + os.stat(cls.from_rstr_nonnull(path)) except OSError: return False return True + @classmethod def ll_os_path_isdir(cls, path): try: st = os.stat(cls.from_rstr_nonnull(path)) except OSError: return False return stat.S_ISDIR(st[0]) - diff --git a/rpython/tool/staticmethods.py b/rpython/tool/staticmethods.py --- a/rpython/tool/staticmethods.py +++ b/rpython/tool/staticmethods.py @@ -10,11 +10,5 @@ class StaticMethods(AbstractMethods): """ Metaclass that turns plain methods into staticmethods. - """ + """ decorator = staticmethod - -class ClassMethods(AbstractMethods): - """ - Metaclass that turns plain methods into classmethods. - """ - decorator = classmethod From noreply at buildbot.pypy.org Tue Apr 29 04:59:19 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 29 Apr 2014 04:59:19 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1746 -- added __closure__ to functions Message-ID: <20140429025919.CB5EC1D2936@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71057:9913d8bb1a20 Date: 2014-04-28 19:58 -0700 http://bitbucket.org/pypy/pypy/changeset/9913d8bb1a20/ Log: issue1746 -- added __closure__ to functions diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -809,6 +809,7 @@ __dict__ = getset_func_dict, __defaults__ = getset_func_defaults, __globals__ = interp_attrproperty_w('w_func_globals', cls=Function), + __closure__ = GetSetProperty(Function.fget_func_closure), __module__ = getset___module__, __weakref__ = make_weakref_descr(Function), ) From noreply at buildbot.pypy.org Tue Apr 29 04:59:21 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 29 Apr 2014 04:59:21 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140429025921.5C5A51D2936@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71058:1f55d73ad8a0 Date: 2014-04-28 19:58 -0700 http://bitbucket.org/pypy/pypy/changeset/1f55d73ad8a0/ Log: merged upstream diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -76,7 +76,7 @@ .. code-block:: console $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,11 +1,17 @@ ======================================= -PyPy 2.3 - XXXX TODO +PyPy 2.3 - Easier Than Ever ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. -This release also contains several bugfixes and performance improvements. +This release also contains several bugfixes and performance improvements, +many generated by real users finding corner cases our `TDD`_ methods missed. +`CFFI`_ has made it easier than ever to use existing C code with both cpython +and PyPy, easing the transition for packages like `cryptography`_, `Pillow`_ +(Python Imaging Library [Fork]), a basic port of `pygame-cffi`_, and others. + +PyPy can now be embedded in a hosting application, for instance inside `uWSGI`_ You can download the PyPy 2.3 release here: @@ -17,72 +23,112 @@ Please consider donating more so we can finish those projects! The three projects are: -* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. +* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. -* STM (software transactional memory): a preview will be released very soon, - as soon as we fix a few bugs +* `STM`_ (software transactional memory): a preview will be released very soon, + once we fix a few bugs -* NumPy: the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. -.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +.. _`CFFI`: http://cffi.readthedocs.org +.. _`cryptography`: https://cryptography.io +.. _`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +.. _`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +.. _`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html What is PyPy? ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; +note that the latest cpython is not faster than cpython 2.7.2) due to its integrated tracing JIT compiler. -This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows -32, or ARM (ARMv6 or ARMv7, with VFPv3). +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. -Work on the native Windows 64 is still stalling, we would welcome a volunteer -to handle that. +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. .. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -* Our Garbage Collector is now "incremental". It should avoid almost - all pauses due to a major collection taking place. Previously, it - would pause the program (rarely) to walk all live objects, which - could take arbitrarily long if your process is using a whole lot of - RAM. Now the same work is done in steps. This should make PyPy - more responsive, e.g. in games. There are still other pauses, from - the GC and the JIT, but they should be on the order of 5 - milliseconds each. +Bugfixes +-------- -* The JIT counters for hot code were never reset, which meant that a - process running for long enough would eventually JIT-compile more - and more rarely executed code. Not only is it useless to compile - such code, but as more compiled code means more memory used, this - gives the impression of a memory leak. This has been tentatively - fixed by decreasing the counters from time to time. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. -* NumPy has been split: now PyPy only contains the core module, called - ``_numpypy``. The ``numpy`` module itself has been moved to - ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. - You need to install NumPy separately with a virtualenv: +* The ARM port no longer crashes on unaligned memory access to floats and doubles, + and singlefloats are supported in the JIT. + +* Generators are faster since they now skip unecessary cleanup + +* A first time contributor simplified JIT traces by adding integer bound + propagation in indexing and logical operations. + +* Optimize consecutive dictionary lookups of the same key in a chain + +* Our extensive pre-translation test suite now runs nightly on more platforms + +* Fix issues with reimporting builtin modules + +* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port + +.. _`HippyVM`: http://www.hippyvm.com + +New Platforms and Features +-------------------------- + +* Support for OpenBSD + +* Code cleanup: we continue to prune out old and unused code, and to refactor + large parts of the codebase. We have sepearated rpython from the PyPy python + interpreter, and rpython is seeing use in other dynamic language projects. + +* Support for precompiled headers in the build process for MSVC + +* Support for objects with __int__ and __float__ methods + +* Tweak support of errno in cpyext (the PyPy implemenation of the capi) + + +Numpy +----- +Numpy support has been split into a builtin ``_numpy`` module and a +fork of the numpy code base adapted to pypy at + ``https://bitbucket.org/pypy/numpy``. +You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; or directly: ``git clone https://bitbucket.org/pypy/numpy.git``; ``cd numpy``; ``pypy setup.py install``. -* non-inlined calls have less overhead +* NumPy support has been improved, many failures in indexing, dtypes, + and scalars were corrected. We are slowly approaching our goal of passing + the numpy test suite. We still do not support object or unicode ndarrays. -* Things that use ``sys.set_trace`` are now JITted (like coverage) +* speed of iteration in dot() is now within 1.5x of the numpy c + implementation (without BLAS acceleration). Since the same array + iterator is used throughout the ``_numpy`` module, speed increases should + be apparent in all Numpy functionality. -* JSON decoding is now very fast (JSON encoding was already very fast) +* Most of the core functionality of nditer has been implemented. -* various buffer copying methods experience speedups (like list-of-ints to - ``int[]`` buffer from cffi) +* A cffi-based ``numpy.random`` module is available as a branch in the numpy + repository, it will be merged soon after this release. -* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, - including ``os.startfile()`` on Windows and a handful of rare ones - on Posix. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. -* numpy has a rudimentary C API that cooperates with ``cpyext`` +Cheers +The PyPy Team -Cheers, -Armin Rigo and Maciej Fijalkowski diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,8 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc is not None and platform.cc.startswith(('gcc', 'clang')): +elif platform.cc is not None and \ + os.path.basename(platform.cc).startswith(('gcc', 'clang')): from rpython.rtyper.tool import rffi_platform COMPILER_INFO = 'GCC ' + rffi_platform.getdefinedstring('__VERSION__', '') else: diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -135,8 +135,7 @@ importer = zipimport.zipimporter(os.path.join(zip_path, '_pkg')) assert zip_path in zipimport._zip_directory_cache file_set = set(zipimport._zip_directory_cache[zip_path].iterkeys()) - compare_set = set(path.replace(os.path.sep, '/') + '.py' - for path in self.created_paths) + compare_set = set(path + '.py' for path in self.created_paths) assert file_set == compare_set finally: self.cleanup_zipfile(self.created_paths) diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, pwd, urllib +import os, sys, urllib from twisted.internet import reactor, defer from twisted.python import log @@ -34,6 +34,13 @@ 'build-pypy-c-jit-linux-armel', ] +def get_user(): + if sys.platform == 'win32': + return os.environ['USERNAME'] + else: + import pwd + return pwd.getpwuid(os.getuid())[0] + def main(): #XXX: handle release tags #XXX: handle validity checks @@ -49,7 +56,7 @@ print 'Forcing', builder, '...' url = "http://buildbot.pypy.org/builders/" + builder + "/force" args = [ - ('username', pwd.getpwuid(os.getuid())[0]), + ('username', get_user()), ('revision', ''), ('submit', 'Force Build'), ('branch', branch), diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -107,7 +107,7 @@ self.matcher = matcher self.lineno = 0 self.columnno = 0 - + def find_next_token(self): while 1: self.state = 0 @@ -126,8 +126,8 @@ i = ~i stop = self.last_matched_index + 1 assert stop >= 0 - if start == stop: - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + if start == stop: + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) source = self.text[start:stop] @@ -147,7 +147,7 @@ else: raise StopIteration return result - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) def adjust_position(self, token): @@ -158,7 +158,7 @@ self.columnno += len(token) else: self.columnno = token.rfind("\n") - + # def inner_loop(self, i): # while i < len(self.text): # char = self.text[i] @@ -186,10 +186,15 @@ class LexingDFARunner(AbstractLexingDFARunner): def __init__(self, matcher, automaton, text, ignore, eof=False, token_class=None): - if token_class is None: + + if not token_class: self.token_class = Token + self.token_position_class = SourcePos + else: self.token_class = token_class + self.token_position_class = token_class.source_position_class + AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -198,8 +203,10 @@ def make_token(self, index, state, text, eof=False): assert (eof and state == -1) or 0 <= state < len(self.automaton.names) - source_pos = SourcePos(index, self.lineno, self.columnno) + + source_pos = self.token_position_class(index, self.lineno, self.columnno) if eof: return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], text, source_pos) diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -1146,9 +1146,9 @@ address_to_fill=None): # port_or_service is a string, not an int (but try str(port_number)). assert port_or_service is None or isinstance(port_or_service, str) - if _c._MACOSX: - if port_or_service is None or port_or_service == '0': - port_or_service = '00' + if _c._MACOSX and flags & AI_NUMERICSERV and \ + (port_or_service is None or port_or_service == '0'): + port_or_service = '00' hints = lltype.malloc(_c.addrinfo, flavor='raw', zero=True) rffi.setintfield(hints, 'c_ai_family', family) rffi.setintfield(hints, 'c_ai_socktype', socktype) diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -1104,6 +1104,21 @@ finally: signal(SIGALRM, SIG_DFL) + def test_append_mode(self): + tfn = str(udir.join('streamio-append-mode')) + fo = streamio.open_file_as_stream # shorthand + x = fo(tfn, 'w') + x.write('abc123') + x.close() + + x = fo(tfn, 'a') + x.seek(0, 0) + x.write('456') + x.close() + x = fo(tfn, 'r') + assert x.read() == 'abc123456' + x.close() + # Speed test diff --git a/rpython/rtyper/module/ll_os_path.py b/rpython/rtyper/module/ll_os_path.py --- a/rpython/rtyper/module/ll_os_path.py +++ b/rpython/rtyper/module/ll_os_path.py @@ -6,26 +6,24 @@ import stat import os -from rpython.tool.staticmethods import ClassMethods # Does a path exist? # This is false for dangling symbolic links. -class BaseOsPath: - __metaclass__ = ClassMethods - +class BaseOsPath(object): + @classmethod def ll_os_path_exists(cls, path): """Test whether a path exists""" try: - st = os.stat(cls.from_rstr_nonnull(path)) + os.stat(cls.from_rstr_nonnull(path)) except OSError: return False return True + @classmethod def ll_os_path_isdir(cls, path): try: st = os.stat(cls.from_rstr_nonnull(path)) except OSError: return False return stat.S_ISDIR(st[0]) - diff --git a/rpython/tool/identity_dict.py b/rpython/tool/identity_dict.py --- a/rpython/tool/identity_dict.py +++ b/rpython/tool/identity_dict.py @@ -3,15 +3,15 @@ except ImportError: idict = None -from UserDict import DictMixin +from collections import MutableMapping -class IdentityDictPurePython(object, DictMixin): +class IdentityDictPurePython(MutableMapping): __slots__ = "_dict _keys".split() def __init__(self): self._dict = {} - self._keys = {} # id(obj) -> obj + self._keys = {} # id(obj) -> obj def __getitem__(self, arg): return self._dict[id(arg)] @@ -24,8 +24,11 @@ del self._keys[id(arg)] del self._dict[id(arg)] - def keys(self): - return self._keys.values() + def __iter__(self): + return self._keys.itervalues() + + def __len__(self): + return len(self._keys) def __contains__(self, arg): return id(arg) in self._dict @@ -37,8 +40,7 @@ return d -class IdentityDictPyPy(object, DictMixin): - __slots__ = ["_dict"] +class IdentityDictPyPy(MutableMapping): def __init__(self): self._dict = idict() @@ -52,8 +54,11 @@ def __delitem__(self, arg): del self._dict[arg] - def keys(self): - return self._dict.keys() + def __iter__(self): + return iter(self._dict.keys()) + + def __len__(self): + return len(self._dict) def __contains__(self, arg): return arg in self._dict @@ -64,8 +69,10 @@ assert len(d) == len(self) return d + def __nonzero__(self): + return bool(self._dict) + if idict is None: identity_dict = IdentityDictPurePython else: identity_dict = IdentityDictPyPy - diff --git a/rpython/tool/staticmethods.py b/rpython/tool/staticmethods.py --- a/rpython/tool/staticmethods.py +++ b/rpython/tool/staticmethods.py @@ -10,11 +10,5 @@ class StaticMethods(AbstractMethods): """ Metaclass that turns plain methods into staticmethods. - """ + """ decorator = staticmethod - -class ClassMethods(AbstractMethods): - """ - Metaclass that turns plain methods into classmethods. - """ - decorator = classmethod From noreply at buildbot.pypy.org Tue Apr 29 05:28:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 29 Apr 2014 05:28:59 +0200 (CEST) Subject: [pypy-commit] pypy default: add test for 9913d8bb1a20 Message-ID: <20140429032859.C539F1D2936@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71059:0524dae88c75 Date: 2014-04-28 23:27 -0400 http://bitbucket.org/pypy/pypy/changeset/0524dae88c75/ Log: add test for 9913d8bb1a20 diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -387,3 +387,9 @@ # because it's a regular method, and .__objclass__ # differs from .im_class in case the method is # defined in some parent class of l's actual class + + def test_func_closure(self): + x = 2 + def f(): + return x + assert f.__closure__[0].cell_contents is x From noreply at buildbot.pypy.org Tue Apr 29 09:27:13 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Apr 2014 09:27:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: Some other place where commit_soon may be a good idea. (it helps for raytrace, Message-ID: <20140429072713.1EFF81C02D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1192:64e31b3d391b Date: 2014-04-29 09:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/64e31b3d391b/ Log: Some other place where commit_soon may be a good idea. (it helps for raytrace, but it's mostly a workaround for a broken stm_should_break_transaction in pypy) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -287,6 +287,12 @@ STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { + /* We should commit soon, probably. This is kind of a + workaround for the broken stm_should_break_transaction of + pypy that doesn't want to commit any more after a minor + collection. It may, however, always be a good idea... */ + stmcb_commit_soon(); + /* 'STM_PSEGMENT->overflow_number' is used now by this collection, in the sense that it's copied to the overflow objects */ STM_PSEGMENT->overflow_number_has_been_used = true; From noreply at buildbot.pypy.org Tue Apr 29 09:27:14 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Apr 2014 09:27:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: some floating uncommitted changes to htm-c7 Message-ID: <20140429072714.265681C02D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1193:ff3d33869baa Date: 2014-04-29 09:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/ff3d33869baa/ Log: some floating uncommitted changes to htm-c7 diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -29,6 +29,7 @@ static void acquire_gil(stm_thread_local_t *tl) { if (pthread_mutex_lock(&_stm_gil) == 0) { _stm_tloc = tl; + STM_SEGMENT->running_thread = tl; _htm_info.use_gil = 1; return; } @@ -121,11 +122,14 @@ } _stm_tloc = tl; + STM_SEGMENT->running_thread = tl; } + void stm_commit_transaction(void) { stm_collect(0); _stm_tloc = NULL; + STM_SEGMENT->running_thread = NULL; if (_htm_info.use_gil) { OPT_ASSERT(!xtest()); if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); @@ -338,7 +342,7 @@ /************************************************************/ -#define NB_NURSERY_PAGES 1024 // 4MB +#define NB_NURSERY_PAGES 1 // 1 page #define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) __thread char *_stm_nursery_base = NULL; diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -44,6 +44,7 @@ stm_jmpbuf_t *jmpbuf_ptr; /* compat only -- always NULL */ char *nursery_current; /* updated... */ int segment_num; /* compat only -- always NULL */ + stm_thread_local_t *running_thread; }; //extern struct stm_segment_info_s _stm_segment; extern __thread struct stm_segment_info_s *_stm_segment; @@ -158,4 +159,6 @@ // XXX ignored } +inline static void stm_flush_timing(stm_thread_local_t *tl, int verbose) {} + #endif From noreply at buildbot.pypy.org Tue Apr 29 14:04:05 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Apr 2014 14:04:05 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add some text to the introduction Message-ID: <20140429120405.2AB581C05CE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5199:e86083d6e22e Date: 2014-04-29 14:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/e86083d6e22e/ Log: add some text to the introduction diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -37,7 +37,7 @@ %% \titlebanner{banner above paper title} % These are ignored unless %% \preprintfooter{short description of paper} % 'preprint' option specified. -\title{Title Text} +\title{The Way Forward in Parallelizing Dynamic Languages} \subtitle{Position Paper, ICOOOLPS'14} \authorinfo{Remi Meier} @@ -64,19 +64,57 @@ transactional memory, dynamic languages, parallelism, global interpreter lock \section{Introduction} +In a world where computers get more and more cores and single-thread +performance increases less and less every year, many dynamic languages +have a problem. While there is certainly a lot of popularity around +languages like Python and Ruby, their ability to make use of multiple +cores is somewhat limited. For ease of implementation they chose to +use a single, global interpreter lock (GIL) to synchronize the +execution of code in multiple threads. While this is a +straight-forward way to eliminate synchronization issues in the +interpreter, it prevents parallel execution. Code executed in multiple +threads will be serialized over this GIL so only one thread executes +at a time. -\subsection*{Issue} -efficiently supporting multi-CPU usage on dynamic languages that were designed with GIL semantics in -mind +There exist several solutions and work-arounds to remove or avoid the +GIL in order to benefit from multiple cores. We are going to discuss +several of them and try to find the best way forward. The first +approach uses fine-grained locking to replace the single GIL. Then +there are shared-nothing models that use for example multiple +processes with multiple interpreters and explicit message +passing. Finally, one can also directly replace the GIL with +transactional memory (TM), either software-based (STM) or +hardware-based (HTM). -(supporting (large) atomic blocks for synchronization) +The approach that wins in the end should perform similarly for +single-threaded execution as compared to the GIL and be able to +execute code in parallel on multiple cores. Furthermore, we will also +take into account the compatibility to existing code that already uses +threads for concurrency, as well as the changes that are required to +the interpreter itself. -\subsection*{Our Position} -Current solutions for replacing the GIL include STM, HTM, and -fine-grained locking. STM is usually too slow, HTM very limited, and -locking suffers from complexity that makes it hard to implement -correctly. We argue that the best way forward is still STM and that -its performance problem can be solved. +These requirements are not easy to meet. We argue that STM is the +overall winner. While it has a big performance problem currently, it +gets more points in all the other categories. We think that it is the +only solution that also enables a better synchronization mechanism to +the application in the form of atomic blocks. + +%% \subsection{Issue} +%% The issue that we want to discuss is how to efficiently support +%% multi-core parallel execution of code in dynamic languages that were +%% designed with GIL semantics in mind. + +%% Furthermore, a solution to this problem should also bring better +%% synchronization mechanism with it... + +%% (supporting (large) atomic blocks for synchronization) + +%% \subsection{Our Position} +%% Current solutions for replacing the GIL include STM, HTM, and +%% fine-grained locking. STM is usually too slow, HTM very limited, and +%% locking suffers from complexity that makes it hard to implement +%% correctly. We argue that the best way forward is still STM and that +%% its performance problem can be solved. %% Current solutions like STM, HTM, and fine-grained locking are slow, hard %% to implement correctly, and don't fit the specific problems of dynamic @@ -84,6 +122,10 @@ %% fix that. \section{Discussion} +(why do we have the GIL? it makes the interpreter thread-safe) +(now there is code relying on the semantics (atomicity of bytecode instructions)) +(synchronization with locks for application-level synchronization is still needed. So GIL is useless for that) + \paragraph{dynamic language VM problems} - high allocation rate (short lived objects)\\ From noreply at buildbot.pypy.org Tue Apr 29 15:26:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Apr 2014 15:26:53 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more text Message-ID: <20140429132653.A85491D23E9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5200:3c88be22028d Date: 2014-04-29 15:26 +0200 http://bitbucket.org/pypy/extradoc/changeset/3c88be22028d/ Log: more text diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -122,9 +122,51 @@ %% fix that. \section{Discussion} -(why do we have the GIL? it makes the interpreter thread-safe) -(now there is code relying on the semantics (atomicity of bytecode instructions)) -(synchronization with locks for application-level synchronization is still needed. So GIL is useless for that) +\subsection{Why is there a GIL?} +The GIL is a very simple synchronization mechanism for supporting +multi-threading in the interpreter. The basic guarantee is that the +GIL may only be released in-between bytecode instructions. The +interpreter can thus rely on complete isolation and atomicity of these +instructions. As a consequence, applications can rely on certain +operations to be atomic. While this is probably not a good idea, +it is used in practice. A solution replacing the GIL should therefore +uphold these guarantees, while preferably also be as easily +implementable as a GIL for the interpreter. + +The GIL also allows for easy integration with external C libraries that +do not need to be thread-safe. For the duration of the calls, we +simply do not release the GIL. External libraries that are explicitly +thread-safe can voluntarily release the GIL themselves in order to +still provide some parallelism. This is done for example for +potentially long I/O operations. Consequently, I/O-bound, +multi-threaded applications can actually parallelize to some +degree. Again, a potential solution should be able to integrate with +external libraries with similar ease. We will however focus our +argumentation more on running code in the interpreted language in +parallel, not the external C calls. + +Since the GIL is mostly an implementation detail of the interpreter, +it is not exposed to the application running on top of it. To +synchronize memory accesses in applications using threads, the +state-of-the-art still means explicit locking everywhere. It is well +known that using locks for synchronization is not easy. They are +non-composable, have overhead, may deadlock, limit scalability, and +overall add a lot of complexity. For a better parallel programming +model for dynamic languages, we propose another, well-known +synchronization mechanism called \emph{atomic blocks}. + +Atomic blocks are composable, deadlock-free, higher-level and expose +useful atomicity and isolation guarantees to the application for a +series of instructions. Interpreters using using a GIL can simply +guarantee that the GIL is not released during the execution of the +atomic block. Of course, this still means that no two atomic blocks +can execute in parallel or even concurrently. Potential solutions +that provide a good way to implement atomic blocks are therefore +preferable. + + + +\subsection{Potential Solutions} \paragraph{dynamic language VM problems} From noreply at buildbot.pypy.org Tue Apr 29 16:30:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 29 Apr 2014 16:30:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: xxx: should mention that the interpreter is typically very large and Message-ID: <20140429143006.ED4A11C02D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5201:ca5adaba9b43 Date: 2014-04-29 16:28 +0200 http://bitbucket.org/pypy/extradoc/changeset/ca5adaba9b43/ Log: xxx: should mention that the interpreter is typically very large and maintained by open-source communities diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -132,6 +132,8 @@ it is used in practice. A solution replacing the GIL should therefore uphold these guarantees, while preferably also be as easily implementable as a GIL for the interpreter. +[xxx mention that the interpreter is typically very large and maintained +by open-source communities] The GIL also allows for easy integration with external C libraries that do not need to be thread-safe. For the duration of the calls, we From noreply at buildbot.pypy.org Tue Apr 29 16:57:02 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Apr 2014 16:57:02 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more text Message-ID: <20140429145702.938F01C244E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5202:fc555e9f5502 Date: 2014-04-29 16:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/fc555e9f5502/ Log: more text diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -73,8 +73,8 @@ execution of code in multiple threads. While this is a straight-forward way to eliminate synchronization issues in the interpreter, it prevents parallel execution. Code executed in multiple -threads will be serialized over this GIL so only one thread executes -at a time. +threads will be serialized over this GIL so that only one thread can +execute at a time. There exist several solutions and work-arounds to remove or avoid the GIL in order to benefit from multiple cores. We are going to discuss @@ -96,7 +96,7 @@ These requirements are not easy to meet. We argue that STM is the overall winner. While it has a big performance problem currently, it gets more points in all the other categories. We think that it is the -only solution that also enables a better synchronization mechanism to +only solution that also provides a better synchronization mechanism to the application in the form of atomic blocks. %% \subsection{Issue} @@ -122,6 +122,13 @@ %% fix that. \section{Discussion} + +\paragraph{dynamic language VM problems} + +- high allocation rate (short lived objects)\\ +- (don't know anything about the program that runs until it actually runs: arbitrary atomic block size) + + \subsection{Why is there a GIL?} The GIL is a very simple synchronization mechanism for supporting multi-threading in the interpreter. The basic guarantee is that the @@ -168,18 +175,16 @@ \subsection{Potential Solutions} -\paragraph{dynamic language VM problems} +The list of criterias for evaluating potential solutions is as follows: +\begin{itemize} +\item Performance +\item Changes required to existing applications +\item Better synchronization options for applications (e.g. atomic blocks) +\item Ease of implementation (interpreter-level) +\item Integration with external libraries +\end{itemize} -- high allocation rate (short lived objects)\\ -- (don't know anything about the program that runs until it actually runs: arbitrary atomic block size) - -\paragraph{GIL} - -- nice semantics\\ -- easy support of atomic blocks\\ -- no parallelism - -\paragraph{fine-grained locking} +\subsubsection*{fine-grained locking} - support of atomic blocks?\\ - hard to get right (deadlocks, performance, lock-granularity)\\ @@ -188,12 +193,13 @@ - (there are some semantic differences, right? not given perfect lock-placement, but well) ( http://www.jython.org/jythonbook/en/1.0/Concurrency.html ) -\paragraph{multiprocessing / no-sharing models} +\subsubsection*{Shared-nothing / multiple processes} - often needs major restructuring of programs (explicit data exchange)\\ - sometimes communication overhead is too large\\ - shared memory is a problem, copies of memory are too expensive +\subsubsection*{Transactional Memory} \paragraph{HTM} - false-sharing on cache-line level\\ @@ -207,7 +213,7 @@ (FastLane: low overhead, not much gain)\\ - unlimited transaction length (easy atomic blocks) -\section{Potential Approach} +\section{The Way Forward} possible solution:\\ - use virtual memory paging to somehow lower the STM overhead\\ - tight integration with GC and jit? From noreply at buildbot.pypy.org Tue Apr 29 16:57:03 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Apr 2014 16:57:03 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Merge Message-ID: <20140429145703.9CC0A1C244E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5203:79b1aa4130ee Date: 2014-04-29 16:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/79b1aa4130ee/ Log: Merge diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -139,6 +139,8 @@ it is used in practice. A solution replacing the GIL should therefore uphold these guarantees, while preferably also be as easily implementable as a GIL for the interpreter. +[xxx mention that the interpreter is typically very large and maintained +by open-source communities] The GIL also allows for easy integration with external C libraries that do not need to be thread-safe. For the duration of the calls, we From noreply at buildbot.pypy.org Tue Apr 29 18:01:24 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Apr 2014 18:01:24 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: some changes Message-ID: <20140429160124.962D91C02D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5204:27a34df61cb1 Date: 2014-04-29 18:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/27a34df61cb1/ Log: some changes diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -177,16 +177,31 @@ \subsection{Potential Solutions} -The list of criterias for evaluating potential solutions is as follows: -\begin{itemize} -\item Performance -\item Changes required to existing applications -\item Better synchronization options for applications (e.g. atomic blocks) -\item Ease of implementation (interpreter-level) -\item Integration with external libraries -\end{itemize} +For the discussion we define a set of criteria to evaluate the +multiple potential solutions for removing or avoiding the GIL and its +limitations: -\subsubsection*{fine-grained locking} +\begin{description} +\item[Performance:] How well does the approach perform compared to the + GIL on single and multiple threads? +\item[Existing applications:] How big are the changes required to + integrate with and parallelize existing applications? +\item[Better synchronization:] Does the approach enable better + synchronization mechanisms for applications (e.g. atomic blocks)? +\item[Implementation:] How difficult is it to implement the approach + in the interpreter? +\item[External libraries:] Does the approach allow for easy + integration of external libraries? +\end{description} + + +\subsubsection{Fine-Grained Locking} + +The first obvious candidate to replace the GIL is to use multiple +locks instead of a single global lock. By refining the granularity +of the locking approach, we gain the ability to run code that does +not access the same objects in parallel. + - support of atomic blocks?\\ - hard to get right (deadlocks, performance, lock-granularity)\\ @@ -195,13 +210,13 @@ - (there are some semantic differences, right? not given perfect lock-placement, but well) ( http://www.jython.org/jythonbook/en/1.0/Concurrency.html ) -\subsubsection*{Shared-nothing / multiple processes} +\subsubsection{Shared-Nothing / multiple processes} - often needs major restructuring of programs (explicit data exchange)\\ - sometimes communication overhead is too large\\ - shared memory is a problem, copies of memory are too expensive -\subsubsection*{Transactional Memory} +\subsubsection{Transactional Memory} \paragraph{HTM} - false-sharing on cache-line level\\ @@ -221,10 +236,10 @@ - tight integration with GC and jit? -\appendix -\section{Appendix Title} +%% \appendix +%% \section{Appendix Title} -This is the text of the appendix, if you need one. +%% This is the text of the appendix, if you need one. \acks From noreply at buildbot.pypy.org Tue Apr 29 21:34:12 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 29 Apr 2014 21:34:12 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: fix version name Message-ID: <20140429193412.646AC1D2CD5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71060:ce3ce6a5c5eb Date: 2014-04-29 22:26 +0300 http://bitbucket.org/pypy/pypy/changeset/ce3ce6a5c5eb/ Log: fix version name diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.6" /* PyPy version as a string */ -#define PYPY_VERSION "2.3.0-final0" +#define PYPY_VERSION "2.3.0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ From noreply at buildbot.pypy.org Tue Apr 29 21:34:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 29 Apr 2014 21:34:13 +0200 (CEST) Subject: [pypy-commit] pypy default: update whatsnew Message-ID: <20140429193413.9142D1D2CD5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71061:b0eeffcdd58f Date: 2014-04-29 22:32 +0300 http://bitbucket.org/pypy/pypy/changeset/b0eeffcdd58f/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -152,3 +152,12 @@ .. branch: small-unroll-improvements Improve optimization of small allocation-heavy loops in the JIT + +.. branch: reflex-support + +.. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 + +.. branch: lexer_token_position_class + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,12 +3,6 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ba569fe1efdb +.. startrev: 0524dae88c75 -.. branch: small-unroll-improvements -Improve optimiziation of small allocation-heavy loops in the JIT -.. branch: reflex-support - -.. branch: refactor-buffer-api -Properly implement old/new buffer API for objects and start work on replacing bufferstr usage From noreply at buildbot.pypy.org Tue Apr 29 21:34:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 29 Apr 2014 21:34:14 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into release branch Message-ID: <20140429193414.DAE471D2CD5@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71062:f76f43f8cc16 Date: 2014-04-29 22:33 +0300 http://bitbucket.org/pypy/pypy/changeset/f76f43f8cc16/ Log: merge default into release branch diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -30,15 +30,15 @@ * `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. -_`Py3k`: http://pypy.org/py3donate.html -_`STM`: http://pypy.org/tmdonate2.html -_ `Numpy`: http://pypy.org/numpydonate.html -_`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html -_`CFFI`: http://cffi.readthedocs.org -_`cryptography`: https://cryptography.io -_`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 -_`pygame-cffi`: https://github.com/CTPUG/pygame_cffi -_`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +.. _`CFFI`: http://cffi.readthedocs.org +.. _`cryptography`: https://cryptography.io +.. _`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +.. _`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +.. _`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html What is PyPy? ============= @@ -56,8 +56,8 @@ bit python is still stalling, we would welcome a volunteer to `handle that`_. -_`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org -_`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== @@ -84,7 +84,7 @@ * Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port -`HippyVM`_: http://www.hippyvm.com +.. _`HippyVM`: http://www.hippyvm.com New Platforms and Features -------------------------- diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -152,3 +152,12 @@ .. branch: small-unroll-improvements Improve optimization of small allocation-heavy loops in the JIT + +.. branch: reflex-support + +.. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 + +.. branch: lexer_token_position_class + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,12 +3,6 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ba569fe1efdb +.. startrev: 0524dae88c75 -.. branch: small-unroll-improvements -Improve optimiziation of small allocation-heavy loops in the JIT -.. branch: reflex-support - -.. branch: refactor-buffer-api -Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -387,3 +387,9 @@ # because it's a regular method, and .__objclass__ # differs from .im_class in case the method is # defined in some parent class of l's actual class + + def test_func_closure(self): + x = 2 + def f(): + return x + assert f.__closure__[0].cell_contents is x diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -809,6 +809,7 @@ __dict__ = getset_func_dict, __defaults__ = getset_func_defaults, __globals__ = interp_attrproperty_w('w_func_globals', cls=Function), + __closure__ = GetSetProperty(Function.fget_func_closure), __module__ = getset___module__, __weakref__ = make_weakref_descr(Function), ) diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,8 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc is not None and platform.cc.startswith(('gcc', 'clang')): +elif platform.cc is not None and \ + os.path.basename(platform.cc).startswith(('gcc', 'clang')): from rpython.rtyper.tool import rffi_platform COMPILER_INFO = 'GCC ' + rffi_platform.getdefinedstring('__VERSION__', '') else: diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, pwd, urllib +import os, sys, urllib from twisted.internet import reactor, defer from twisted.python import log @@ -34,6 +34,13 @@ 'build-pypy-c-jit-linux-armel', ] +def get_user(): + if sys.platform == 'win32': + return os.environ['USERNAME'] + else: + import pwd + return pwd.getpwuid(os.getuid())[0] + def main(): #XXX: handle release tags #XXX: handle validity checks @@ -49,7 +56,7 @@ print 'Forcing', builder, '...' url = "http://buildbot.pypy.org/builders/" + builder + "/force" args = [ - ('username', pwd.getpwuid(os.getuid())[0]), + ('username', get_user()), ('revision', ''), ('submit', 'Force Build'), ('branch', branch), diff --git a/rpython/rtyper/module/ll_os_path.py b/rpython/rtyper/module/ll_os_path.py --- a/rpython/rtyper/module/ll_os_path.py +++ b/rpython/rtyper/module/ll_os_path.py @@ -6,26 +6,24 @@ import stat import os -from rpython.tool.staticmethods import ClassMethods # Does a path exist? # This is false for dangling symbolic links. -class BaseOsPath: - __metaclass__ = ClassMethods - +class BaseOsPath(object): + @classmethod def ll_os_path_exists(cls, path): """Test whether a path exists""" try: - st = os.stat(cls.from_rstr_nonnull(path)) + os.stat(cls.from_rstr_nonnull(path)) except OSError: return False return True + @classmethod def ll_os_path_isdir(cls, path): try: st = os.stat(cls.from_rstr_nonnull(path)) except OSError: return False return stat.S_ISDIR(st[0]) - diff --git a/rpython/tool/staticmethods.py b/rpython/tool/staticmethods.py --- a/rpython/tool/staticmethods.py +++ b/rpython/tool/staticmethods.py @@ -10,11 +10,5 @@ class StaticMethods(AbstractMethods): """ Metaclass that turns plain methods into staticmethods. - """ + """ decorator = staticmethod - -class ClassMethods(AbstractMethods): - """ - Metaclass that turns plain methods into classmethods. - """ - decorator = classmethod From noreply at buildbot.pypy.org Tue Apr 29 21:39:49 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 29 Apr 2014 21:39:49 +0200 (CEST) Subject: [pypy-commit] pypy default: typo (jodal) Message-ID: <20140429193949.9DDA71C01CB@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71063:6f62316b9343 Date: 2014-04-29 22:39 +0300 http://bitbucket.org/pypy/pypy/changeset/6f62316b9343/ Log: typo (jodal) diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -92,7 +92,7 @@ * Support for OpenBSD * Code cleanup: we continue to prune out old and unused code, and to refactor - large parts of the codebase. We have sepearated rpython from the PyPy python + large parts of the codebase. We have separated rpython from the PyPy python interpreter, and rpython is seeing use in other dynamic language projects. * Support for precompiled headers in the build process for MSVC From noreply at buildbot.pypy.org Tue Apr 29 23:10:09 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 29 Apr 2014 23:10:09 +0200 (CEST) Subject: [pypy-commit] pypy default: fix issue 1530 by removing oracle module Message-ID: <20140429211009.851CF1C01CB@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71064:fadfe9f9e3d4 Date: 2014-04-29 23:58 +0300 http://bitbucket.org/pypy/pypy/changeset/fadfe9f9e3d4/ Log: fix issue 1530 by removing oracle module diff too long, truncating to 2000 out of 6366 lines diff --git a/pypy/doc/config/objspace.usemodules.oracle.txt b/pypy/doc/config/objspace.usemodules.oracle.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.txt +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/module/cpyext/patches/cx_Oracle.patch b/pypy/module/cpyext/patches/cx_Oracle.patch deleted file mode 100644 --- a/pypy/module/cpyext/patches/cx_Oracle.patch +++ /dev/null @@ -1,60 +0,0 @@ -Index: cx_Oracle.c -=================================================================== ---- cx_Oracle.c (r�vision 333) -+++ cx_Oracle.c (copie de travail) -@@ -65,6 +65,13 @@ - #define CXORA_BASE_EXCEPTION PyExc_StandardError - #endif - -+// define missing PyDateTime_DELTA macros -+#ifndef PYPY_VERSION -+PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) -+PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) -+PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds) -+#endif -+ - // define simple construct for determining endianness of the platform - // Oracle uses native encoding with OCI_UTF16 but bails when a BOM is written - #define IS_LITTLE_ENDIAN (int)*(unsigned char*) &one -@@ -138,6 +145,7 @@ - *exception = PyErr_NewException(buffer, baseException, NULL); - if (!*exception) - return -1; -+ Py_INCREF(*exception); - return PyModule_AddObject(module, name, *exception); - } - -Index: IntervalVar.c -=================================================================== ---- IntervalVar.c (r�vision 333) -+++ IntervalVar.c (copie de travail) -@@ -121,7 +121,7 @@ - unsigned pos, // array position to set - PyObject *value) // value to set - { -- sb4 hours, minutes, seconds; -+ sb4 days, hours, minutes, seconds, microseconds; - PyDateTime_Delta *delta; - sword status; - -@@ -131,13 +131,16 @@ - } - - delta = (PyDateTime_Delta*) value; -- hours = (sb4) delta->seconds / 3600; -- seconds = delta->seconds - hours * 3600; -+ days = PyDateTime_DELTA_GET_DAYS(delta); -+ seconds = PyDateTime_DELTA_GET_SECONDS(delta); -+ hours = (sb4) seconds / 3600; -+ seconds -= hours * 3600; - minutes = (sb4) seconds / 60; - seconds -= minutes * 60; -+ microseconds = PyDateTime_DELTA_GET_MICROSECONDS(delta); - status = OCIIntervalSetDaySecond(var->environment->handle, -- var->environment->errorHandle, delta->days, hours, minutes, -- seconds, delta->microseconds, var->data[pos]); -+ var->environment->errorHandle, days, hours, minutes, -+ seconds, microseconds, var->data[pos]); - if (Environment_CheckForError(var->environment, status, - "IntervalVar_SetValue()") < 0) - return -1; diff --git a/pypy/module/oracle/__init__.py b/pypy/module/oracle/__init__.py deleted file mode 100644 --- a/pypy/module/oracle/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -from pypy.interpreter.mixedmodule import MixedModule - -class Module(MixedModule): - applevel_name = 'cx_Oracle' - - interpleveldefs = { - 'connect': 'interp_connect.W_Connection', - 'Connection': 'interp_connect.W_Connection', - 'NUMBER': 'interp_variable.VT_Float', - 'STRING': 'interp_variable.VT_String', - 'UNICODE': 'interp_variable.VT_NationalCharString', - 'DATETIME': 'interp_variable.VT_DateTime', - 'DATE': 'interp_variable.VT_Date', - 'TIMESTAMP': 'interp_variable.VT_Timestamp', - 'INTERVAL': 'interp_variable.VT_Interval', - 'BINARY': 'interp_variable.VT_Binary', - 'LONG_STRING': 'interp_variable.VT_LongString', - 'LONG_BINARY': 'interp_variable.VT_LongBinary', - 'FIXED_CHAR': 'interp_variable.VT_FixedChar', - 'FIXED_UNICODE': 'interp_variable.VT_FixedNationalChar', - 'CURSOR': 'interp_variable.VT_Cursor', - 'BLOB': 'interp_variable.VT_BLOB', - 'CLOB': 'interp_variable.VT_CLOB', - 'OBJECT': 'interp_variable.VT_Object', - 'Variable': 'interp_variable.W_Variable', - 'SessionPool': 'interp_pool.W_SessionPool', - } - - appleveldefs = { - 'version': 'app_oracle.version', - 'paramstyle': 'app_oracle.paramstyle', - 'makedsn': 'app_oracle.makedsn', - 'TimestampFromTicks': 'app_oracle.TimestampFromTicks', - } - for name in """DataError DatabaseError Error IntegrityError InterfaceError - InternalError NotSupportedError OperationalError - ProgrammingError Warning""".split(): - appleveldefs[name] = "app_oracle.%s" % (name,) - - def startup(self, space): - from pypy.module.oracle.interp_error import get - state = get(space) - state.startup(space) - (state.w_DecimalType, - state.w_DateTimeType, state.w_DateType, state.w_TimedeltaType, - ) = space.fixedview(space.appexec([], """(): - import decimal, datetime - return (decimal.Decimal, - datetime.datetime, datetime.date, datetime.timedelta) - """)) - space.setattr(space.wrap(self), - space.wrap("Timestamp"), state.w_DateTimeType) - space.setattr(space.wrap(self), - space.wrap("Date"), state.w_DateType) diff --git a/pypy/module/oracle/app_oracle.py b/pypy/module/oracle/app_oracle.py deleted file mode 100644 --- a/pypy/module/oracle/app_oracle.py +++ /dev/null @@ -1,42 +0,0 @@ -version = '5.0.0' -paramstyle = 'named' - -class Warning(StandardError): - pass - -class Error(StandardError): - pass - -class InterfaceError(Error): - pass - -class DatabaseError(Error): - pass - -class DataError(DatabaseError): - pass - -class OperationalError(DatabaseError): - pass - -class IntegrityError(DatabaseError): - pass - -class InternalError(DatabaseError): - pass - -class ProgrammingError(DatabaseError): - pass - -class NotSupportedError(DatabaseError): - pass - - -def makedsn(host, port, sid): - return ("(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=" - "(PROTOCOL=TCP)(HOST=%s)(PORT=%s)))" - "(CONNECT_DATA=(SID=%s)))" % (host, port, sid)) - -def TimestampFromTicks(*args): - import datetime - return datetime.datetime.fromtimestamp(*args) diff --git a/pypy/module/oracle/config.py b/pypy/module/oracle/config.py deleted file mode 100644 --- a/pypy/module/oracle/config.py +++ /dev/null @@ -1,54 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.oracle import roci - -WITH_UNICODE = False - -MAX_STRING_CHARS = 4000 -MAX_BINARY_BYTES = 4000 - -if WITH_UNICODE: - CHARSETID = roci.OCI_UTF16ID - BYTES_PER_CHAR = 2 - def string_w(space, w_obj): - return space.unicode_w(w_obj) -else: - def string_w(space, w_obj): - return space.str_w(w_obj) - - def w_string(space, buf, len=-1): - #assert type(len) is int - if len < 0: - return space.wrap(rffi.charp2str(buf)) - else: - return space.wrap(rffi.charpsize2str(buf, len)) - CHARSETID = 0 - BYTES_PER_CHAR = 1 - - class StringBuffer: - "Fill a char* buffer with data, suitable to pass to Oracle functions" - def __init__(self): - self.ptr = lltype.nullptr(roci.oratext.TO) - self.size = 0 - - def fill(self, space, w_value): - if w_value is None or space.is_w(w_value, space.w_None): - self.clear() - else: - strvalue = space.str_w(w_value) - self.ptr = rffi.str2charp(strvalue) - self.size = len(strvalue) - - def fill_with_unicode(self, space, w_value): - if w_value is None or space.is_w(w_value, space.w_None): - self.clear() - else: - # XXX ucs2 only probably - univalue = space.unicode_w(w_value) - self.ptr = rffi.cast(roci.oratext, rffi.unicode2wcharp(univalue)) - self.size = len(univalue) * 2 - - def clear(self): - if self.ptr: - rffi.free_charp(self.ptr) - self.ptr = lltype.nullptr(roci.oratext.TO) - self.size = 0 diff --git a/pypy/module/oracle/conftest.py b/pypy/module/oracle/conftest.py deleted file mode 100644 --- a/pypy/module/oracle/conftest.py +++ /dev/null @@ -1,9 +0,0 @@ -import os - -def pytest_addoption(parser): - group = parser.getgroup("Oracle module options") - group.addoption('--oracle-home', dest="oracle_home", - help="Home directory of Oracle client installation", - default=os.environ.get("ORACLE_HOME")) - group.addoption('--oracle-connect', dest="oracle_connect", - help="connect string (user/pwd at db) used for tests") diff --git a/pypy/module/oracle/interp_connect.py b/pypy/module/oracle/interp_connect.py deleted file mode 100644 --- a/pypy/module/oracle/interp_connect.py +++ /dev/null @@ -1,551 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.typedef import (TypeDef, interp_attrproperty_w, - GetSetProperty) -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError -from rpython.rtyper.lltypesystem import rffi, lltype - -from pypy.module.oracle import roci, interp_error -from pypy.module.oracle.config import string_w, StringBuffer, MAX_STRING_CHARS -from pypy.module.oracle.interp_environ import Environment -from pypy.module.oracle.interp_cursor import W_Cursor -from pypy.module.oracle.interp_pool import W_SessionPool -from pypy.module.oracle.interp_variable import VT_String - - -class W_Connection(W_Root): - def __init__(self): - self.commitMode = roci.OCI_DEFAULT - self.environment = None - self.autocommit = False - - self.sessionHandle = lltype.nullptr(roci.OCISession.TO) - self.serverHandle = lltype.nullptr(roci.OCIServer.TO) - - self.w_inputTypeHandler = None - self.w_outputTypeHandler = None - - self.w_version = None - self.release = False - - - @unwrap_spec(mode=int, handle=int, - threaded=bool, twophase=bool, events=bool, - purity=bool) - def descr_new(space, w_subtype, - w_user=None, - w_password=None, - w_dsn=None, - mode=roci.OCI_DEFAULT, - handle=0, # XXX should be a ptr type - w_pool=None, - threaded=False, - twophase=False, - events=False, - w_cclass=None, - purity=0, - w_newpassword=None): - self = space.allocate_instance(W_Connection, w_subtype) - W_Connection.__init__(self) - - # set up the environment - if w_pool: - pool = space.interp_w(W_SessionPool, w_pool) - self.environment = pool.environment.clone() - else: - pool = None - self.environment = Environment.create(space, threaded, events) - - self.w_username = w_user - self.w_password = w_password - self.w_tnsentry = w_dsn - - # perform some parsing, if necessary - if (self.w_username and not self.w_password and - space.is_true(space.contains(self.w_username, space.wrap('/')))): - (self.w_username, self.w_password) = space.listview( - space.call_method(self.w_username, 'split', - space.wrap('/'), space.wrap(1))) - - if (self.w_password and not self.w_tnsentry and - space.is_true(space.contains(self.w_password, space.wrap('@')))): - (self.w_password, self.w_tnsentry) = space.listview( - space.call_method(self.w_password, 'split', - space.wrap('@'), space.wrap(1))) - - if pool or w_cclass is not None: - self.getConnection(space, pool, w_cclass, purity) - else: - self.connect(space, mode, twophase) - return space.wrap(self) - - def __del__(self): - self.enqueue_for_destruction(self.environment.space, - W_Connection.destructor, - '__del__ method of ') - - def destructor(self): - assert isinstance(self, W_Connection) - if self.release: - roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - roci.OCISessionRelease( - self.handle, self.environment.errorHandle, - None, 0, roci.OCI_DEFAULT) - else: - if self.sessionHandle: - roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - roci.OCISessionEnd( - self.handle, self.environment.errorHandle, - self.sessionHandle, roci.OCI_DEFAULT) - if self.serverHandle: - roci.OCIServerDetach( - self.serverHandle, self.environment.errorHandle, - roci.OCI_DEFAULT) - - def connect(self, space, mode, twophase): - stringBuffer = StringBuffer() - - # allocate the server handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIServer).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SERVER, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate server handle") - self.serverHandle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - # attach to the server - stringBuffer.fill(space, self.w_tnsentry) - try: - status = roci.OCIServerAttach( - self.serverHandle, - self.environment.errorHandle, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Connect(): server attach") - finally: - stringBuffer.clear() - - # allocate the service context handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISvcCtx).TO, - 1, flavor='raw') - - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SVCCTX, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate service context handle") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - # set attribute for server handle - status = roci.OCIAttrSet( - self.handle, roci.OCI_HTYPE_SVCCTX, - self.serverHandle, 0, - roci.OCI_ATTR_SERVER, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set server handle") - - # set the internal and external names; these are needed for global - # transactions but are limited in terms of the lengths of the strings - if twophase: - status = roci.OCIAttrSet( - self.serverHandle, roci.OCI_HTYPE_SERVER, - "cx_Oracle", 0, - roci.OCI_ATTR_INTERNAL_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set internal name") - status = roci.OCIAttrSet( - self.serverHandle, roci.OCI_HTYPE_SERVER, - "cx_Oracle", 0, - roci.OCI_ATTR_EXTERNAL_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set external name") - - # allocate the session handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISession).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SESSION, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate session handle") - self.sessionHandle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - credentialType = roci.OCI_CRED_EXT - - # set user name in session handle - stringBuffer.fill(space, self.w_username) - try: - if stringBuffer.size > 0: - credentialType = roci.OCI_CRED_RDBMS - status = roci.OCIAttrSet( - self.sessionHandle, - roci.OCI_HTYPE_SESSION, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_USERNAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set user name") - finally: - stringBuffer.clear() - - # set password in session handle - stringBuffer.fill(space, self.w_password) - try: - if stringBuffer.size > 0: - credentialType = roci.OCI_CRED_RDBMS - status = roci.OCIAttrSet( - self.sessionHandle, - roci.OCI_HTYPE_SESSION, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_PASSWORD, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set password") - finally: - stringBuffer.clear() - - # set the session handle on the service context handle - status = roci.OCIAttrSet( - self.handle, roci.OCI_HTYPE_SVCCTX, - self.sessionHandle, 0, - roci.OCI_ATTR_SESSION, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set session handle") - - # if a new password has been specified, change it which will also - # establish the session - - # begin the session - status = roci.OCISessionBegin( - self.handle, self.environment.errorHandle, - self.sessionHandle, credentialType, mode) - try: - self.environment.checkForError( - status, "Connection_Connect(): begin session") - except: - self.sessionHandle = lltype.nullptr(roci.OCISession.TO) - raise - - def getConnection(self, space, pool, w_cclass, purity): - """Get a connection using the OCISessionGet() interface - rather than using the low level interface for connecting.""" - - proxyCredentials = False - authInfo = lltype.nullptr(roci.OCIAuthInfo.TO) - - if pool: - w_dbname = pool.w_name - mode = roci.OCI_SESSGET_SPOOL - if not pool.homogeneous and pool.w_username and self.w_username: - proxyCredentials = space.is_true(space.ne(pool.w_username, self.w_username)) - mode |= roci.OCI_SESSGET_CREDPROXY - else: - w_dbname = self.w_tnsentry - mode = roci.OCI_SESSGET_STMTCACHE - - stringBuffer = StringBuffer() - - # set up authorization handle, if needed - if not pool or w_cclass or proxyCredentials: - # create authorization handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIAuthInfo).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, - roci.OCI_HTYPE_AUTHINFO, - 0, lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_GetConnection(): allocate handle") - - authInfo = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - externalCredentials = True - - # set the user name, if applicable - stringBuffer.fill(space, self.w_username) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_USERNAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set user name") - finally: - stringBuffer.clear() - - # set the password, if applicable - stringBuffer.fill(space, self.w_password) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_PASSWORD, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set password") - finally: - stringBuffer.clear() - - # if no user name or password are set, using external credentials - if not pool and externalCredentials: - mode |= roci.OCI_SESSGET_CREDEXT - - # set the connection class, if applicable - if roci.OCI_ATTR_CONNECTION_CLASS is not None: - stringBuffer.fill(space, w_cclass) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_CONNECTION_CLASS, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Connection_GetConnection(): set connection class") - finally: - stringBuffer.clear() - - # set the purity, if applicable - if (roci.OCI_ATTR_PURITY is not None - and purity != roci.OCI_ATTR_PURITY_DEFAULT): - purityptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - purityptr[0] = rffi.cast(roci.ub4, purity) - try: - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - rffi.cast(roci.dvoidp, purityptr), - rffi.sizeof(roci.ub4), - roci.OCI_ATTR_PURITY, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set purity") - finally: - lltype.free(purityptr, flavor='raw') - - # acquire the new session - stringBuffer.fill(space, w_dbname) - foundptr = lltype.malloc(rffi.CArrayPtr(roci.boolean).TO, - 1, flavor='raw') - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISvcCtx).TO, - 1, flavor='raw') - try: - status = roci.OCISessionGet( - self.environment.handle, - self.environment.errorHandle, - handleptr, - authInfo, - stringBuffer.ptr, stringBuffer.size, - None, 0, - lltype.nullptr(roci.Ptr(roci.oratext).TO), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - foundptr, - mode) - self.environment.checkForError( - status, "Connection_GetConnection(): get connection") - - self.handle = handleptr[0] - finally: - stringBuffer.clear() - lltype.free(foundptr, flavor='raw') - lltype.free(handleptr, flavor='raw') - - # eliminate the authorization handle immediately, if applicable - if authInfo: - roci.OCIHandleFree(authInfo, roci.OCI_HTYPE_AUTHINFO) - - # copy members in the case where a pool is being used - if pool: - if not proxyCredentials: - self.w_username = pool.w_username - self.w_password = pool.w_password - self.w_tnsentry = pool.w_tnsentry - self.sessionPool = pool - - self.release = True - - def _checkConnected(self, space): - if not self.handle: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap("not connected")) - - def close(self, space): - # make sure we are actually connnected - self._checkConnected(space) - - # perform a rollback - status = roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Close(): rollback") - - # logoff of the server - if self.sessionHandle: - status = roci.OCISessionEnd( - self.handle, self.environment.errorHandle, - self.sessionHandle, roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Close(): end session") - roci.OCIHandleFree(self.handle, roci.OCI_HTYPE_SVCCTX) - - self.handle = lltype.nullptr(roci.OCISvcCtx.TO) - - def commit(self, space): - # make sure we are actually connected - self._checkConnected(space) - - status = roci.OCITransCommit( - self.handle, self.environment.errorHandle, - self.commitMode) - self.environment.checkForError( - status, "Connection_Commit()") - - self.commitMode = roci.OCI_DEFAULT - - def rollback(self, space): - # make sure we are actually connected - self._checkConnected(space) - - status = roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Rollback()") - - def newCursor(self, space): - return space.wrap(W_Cursor(space, self)) - - def _getCharacterSetName(self, space, attribute): - # get character set id - charsetIdPtr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.environment.handle, roci.OCI_HTYPE_ENV, - rffi.cast(roci.dvoidp, charsetIdPtr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - attribute, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetCharacterSetName(): get charset id") - charsetId = charsetIdPtr[0] - finally: - lltype.free(charsetIdPtr, flavor='raw') - - # get character set name - charsetname_buf, charsetname = rffi.alloc_buffer(roci.OCI_NLS_MAXBUFSZ) - try: - status = roci.OCINlsCharSetIdToName( - self.environment.handle, - charsetname_buf, roci.OCI_NLS_MAXBUFSZ, - charsetId) - self.environment.checkForError( - status, - "Connection_GetCharacterSetName(): get Oracle charset name") - - ianacharset_buf, ianacharset = rffi.alloc_buffer( - roci.OCI_NLS_MAXBUFSZ) - - try: - # get IANA character set name - status = roci.OCINlsNameMap( - self.environment.handle, - ianacharset_buf, roci.OCI_NLS_MAXBUFSZ, - charsetname_buf, roci.OCI_NLS_CS_ORA_TO_IANA) - self.environment.checkForError( - status, - "Connection_GetCharacterSetName(): translate NLS charset") - charset = rffi.charp2str(ianacharset_buf) - finally: - rffi.keep_buffer_alive_until_here(ianacharset_buf, ianacharset) - finally: - rffi.keep_buffer_alive_until_here(charsetname_buf, charsetname) - return space.wrap(charset) - - def get_encoding(self, space): - return self._getCharacterSetName(space, roci.OCI_ATTR_ENV_CHARSET_ID) - def get_nationalencoding(self, space): - return self._getCharacterSetName(space, roci.OCI_ATTR_ENV_CHARSET_ID) - def get_maxbytespercharacter(self, space): - return space.wrap(self.environment.maxBytesPerCharacter) - - def get_version(self, space): - # if version has already been determined, no need to determine again - if self.w_version: - return self.w_version - - # allocate a cursor to retrieve the version - cursor = W_Cursor(space, self) - - # allocate version and compatibility variables - versionVar = VT_String(cursor, cursor.arraySize, MAX_STRING_CHARS) - compatVar = VT_String(cursor, cursor.arraySize, MAX_STRING_CHARS) - - # call stored procedure - cursor._call(space, "dbms_utility.db_version", - None, space.newlist([space.wrap(versionVar), - space.wrap(compatVar)])) - - # retrieve value - self.w_version = versionVar.getValue(space, 0) - return self.w_version - -W_Connection.typedef = TypeDef( - "Connection", - __new__ = interp2app(W_Connection.descr_new.im_func), - username = interp_attrproperty_w('w_username', W_Connection), - password = interp_attrproperty_w('w_password', W_Connection), - tnsentry = interp_attrproperty_w('w_tnsentry', W_Connection), - - close = interp2app(W_Connection.close), - commit = interp2app(W_Connection.commit), - rollback = interp2app(W_Connection.rollback), - - cursor = interp2app(W_Connection.newCursor), - - encoding = GetSetProperty(W_Connection.get_encoding), - nationalencoding = GetSetProperty(W_Connection.get_nationalencoding), - maxBytesPerCharacter = GetSetProperty(W_Connection.get_maxbytespercharacter), - version = GetSetProperty(W_Connection.get_version), - ) diff --git a/pypy/module/oracle/interp_cursor.py b/pypy/module/oracle/interp_cursor.py deleted file mode 100644 --- a/pypy/module/oracle/interp_cursor.py +++ /dev/null @@ -1,1094 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.typedef import interp_attrproperty, interp_attrproperty_w -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rtyper.lltypesystem import rffi, lltype - -from pypy.module.oracle import roci, interp_error -from pypy.module.oracle.config import w_string, string_w, StringBuffer -from pypy.module.oracle import interp_variable -from pypy.module.oracle.interp_error import get - -# XXX are those "assert isinstance(xxx, interp_variable.W_Variable)" necessary? -# the bindList should annotate to SomeList(SomeInstance(W_Variable)) - -class W_Cursor(W_Root): - def __init__(self, space, connection): - self.connection = connection - self.environment = connection.environment - - self.w_statement = None - self.statementType = -1 - self.handle = lltype.nullptr(roci.OCIStmt.TO) - self.isOpen = True - self.isOwned = False - - self.setInputSizes = False - self.arraySize = 50 - self.fetchArraySize = 50 - self.bindArraySize = 1 - self.bindList = None - self.bindDict = None - self.numbersAsStrings = False - self.outputSize = -1 - self.outputSizeColumn = -1 - - self.w_inputTypeHandler = None - self.w_outputTypeHandler = None - self.w_rowFactory = None - - def execute(self, space, w_stmt, __args__): - args_w, kw_w = __args__.unpack() - - if space.is_w(w_stmt, space.w_None): - w_stmt = None - - if len(args_w) > 1: - raise OperationError( - space.w_TypeError, - space.wrap("Too many arguments")) - elif len(args_w) == 1: - if len(kw_w) > 0: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap( - "expecting argument or keyword arguments, not both")) - w_vars = args_w[0] - elif len(kw_w) > 0: - w_vars = space.newdict() - for key, w_value in kw_w.iteritems(): - space.setitem(w_vars, space.wrap(key), w_value) - else: - w_vars = None - - # make sure the cursor is open - self._checkOpen(space) - - return self._execute(space, w_stmt, w_vars) - - def prepare(self, space, w_stmt, w_tag=None): - # make sure the cursor is open - self._checkOpen(space) - - # prepare the statement - self._internalPrepare(space, w_stmt, w_tag) - - def _execute(self, space, w_stmt, w_vars): - - # prepare the statement, if applicable - self._internalPrepare(space, w_stmt, None) - - # perform binds - if w_vars is None: - pass - elif space.isinstance_w(w_vars, space.w_dict): - self._setBindVariablesByName(space, w_vars, 1, 0, 0) - else: - self._setBindVariablesByPos(space, w_vars, 1, 0, 0) - self._performBind(space) - - # execute the statement - isQuery = self.statementType == roci.OCI_STMT_SELECT - if isQuery: - numIters = 0 - else: - numIters = 1 - self._internalExecute(space, numIters=numIters) - - # perform defines, if necessary - if isQuery and self.fetchVariables is None: - self._performDefine() - - # reset the values of setoutputsize() - self.outputSize = -1 - self.outputSizeColumn = -1 - - # for queries, return the cursor for convenience - if isQuery: - return space.wrap(self) - - # for all other statements, simply return None - return space.w_None - - def executemany(self, space, w_stmt, w_list_of_args): - if space.is_w(w_stmt, space.w_None): - w_stmt = None - if not space.isinstance_w(w_list_of_args, space.w_list): - raise OperationError( - space.w_TypeError, - space.wrap("list expected")) - - # make sure the cursor is open - self._checkOpen(space) - - # prepare the statement - self._internalPrepare(space, w_stmt, None) - - # queries are not supported as the result is undefined - if self.statementType == roci.OCI_STMT_SELECT: - raise OperationError( - get(space).w_NotSupportedError, - space.wrap("queries not supported: results undefined")) - - # perform binds - args_w = space.listview(w_list_of_args) - numrows = len(args_w) - for i in range(numrows): - w_arguments = args_w[i] - deferred = i < numrows - 1 - if space.isinstance_w(w_arguments, space.w_dict): - self._setBindVariablesByName( - space, w_arguments, numrows, i, deferred) - else: - self._setBindVariablesByPos( - space, w_arguments, numrows, i, deferred) - self._performBind(space) - - # execute the statement, but only if the number of rows is greater than - # zero since Oracle raises an error otherwise - if numrows > 0: - self._internalExecute(space, numIters=numrows) - - def close(self, space): - # make sure we are actually open - self._checkOpen(space) - - # close the cursor - self.freeHandle(space, raiseError=True) - - self.isOpen = False - self.handle = lltype.nullptr(roci.OCIStmt.TO) - - @unwrap_spec(name=str) - def callfunc(self, space, name, w_returnType, w_parameters=None): - retvar = interp_variable.newVariableByType(space, self, w_returnType, 1) - if space.is_none(w_parameters): - w_parameters = None - - self._call(space, name, retvar, w_parameters) - - # determine the results - return retvar.getValue(space, 0) - - @unwrap_spec(name=str) - def callproc(self, space, name, w_parameters=None): - if space.is_none(w_parameters): - w_parameters = None - - self._call(space, name, None, w_parameters) - - # create the return value - ret_w = [] - if self.bindList: - for v in self.bindList: - assert isinstance(v, interp_variable.W_Variable) - ret_w.append(v.getValue(space, 0)) - return space.newlist(ret_w) - - def _call(self, space, name, retvar, w_args): - # determine the number of arguments passed - if w_args: - numArguments = space.len_w(w_args) - else: - numArguments = 0 - - # make sure we are actually open - self._checkOpen(space) - - # add the return value, if applicable - if retvar: - offset = 1 - w_vars = space.newlist([retvar]) - if w_args: - space.call_method(w_vars, "extend", w_args) - else: - offset = 0 - w_vars = w_args - - # build up the statement - args = ', '.join([':%d' % (i + offset + 1,) - for i in range(numArguments)]) - if retvar: - stmt = "begin :1 := %s(%s); end;" % (name, args) - else: - stmt = "begin %s(%s); end;" % (name, args) - - self._execute(space, space.wrap(stmt), w_vars) - - def _checkOpen(self, space): - if not self.isOpen: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap("not open")) - - def allocateHandle(self): - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIStmt).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_STMT, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Cursor_New()") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - self.isOwned = True - - def freeHandle(self, space, raiseError=True): - if not self.handle: - return - if self.isOwned: - roci.OCIHandleFree(self.handle, roci.OCI_HTYPE_STMT) - elif self.connection.handle: - tagBuffer = StringBuffer() - tagBuffer.fill(space, self.w_statementTag) - try: - status = roci.OCIStmtRelease( - self.handle, self.environment.errorHandle, - tagBuffer.ptr, tagBuffer.size, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Cursor_FreeHandle()") - finally: - tagBuffer.clear() - - def _internalPrepare(self, space, w_stmt, w_tag): - # make sure we don't get a situation where nothing is to be executed - if w_stmt is None and self.w_statement is None: - raise OperationError( - interp_error.get(space).w_ProgrammingError, - space.wrap("no statement specified " - "and no prior statement prepared")) - - # nothing to do if the statement is identical to the one already stored - # but go ahead and prepare anyway for create, alter and drop statments - if w_stmt is None or w_stmt == self.w_statement: - if self.statementType not in (roci.OCI_STMT_CREATE, - roci.OCI_STMT_DROP, - roci.OCI_STMT_ALTER): - return - w_stmt = self.w_statement - else: - self.w_statement = w_stmt - - # release existing statement, if necessary - self.w_statementTag = w_tag - self.freeHandle(space) - - # prepare statement - self.isOwned = False - handleptr = lltype.malloc(roci.Ptr(roci.OCIStmt).TO, - 1, flavor='raw') - stmtBuffer = StringBuffer() - tagBuffer = StringBuffer() - stmtBuffer.fill(space, w_stmt) - tagBuffer.fill(space, w_tag) - try: - status = roci.OCIStmtPrepare2( - self.connection.handle, handleptr, - self.environment.errorHandle, - stmtBuffer.ptr, stmtBuffer.size, - tagBuffer.ptr, tagBuffer.size, - roci.OCI_NTV_SYNTAX, roci.OCI_DEFAULT) - - self.environment.checkForError( - status, "Connection_InternalPrepare(): prepare") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - stmtBuffer.clear() - tagBuffer.clear() - - # clear bind variables, if applicable - if not self.setInputSizes: - self.bindList = None - self.bindDict = None - - # clear row factory, if applicable - self.rowFactory = None - - # determine if statement is a query - self._getStatementType() - - def _setErrorOffset(self, space, e): - if e.match(space, get(space).w_DatabaseError): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, 1, flavor='raw') - try: - roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARSE_ERROR_OFFSET, - self.environment.errorHandle) - e.offset = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - def _internalExecute(self, space, numIters): - if self.connection.autocommit: - mode = roci.OCI_COMMIT_ON_SUCCESS - else: - mode = roci.OCI_DEFAULT - - status = roci.OCIStmtExecute( - self.connection.handle, - self.handle, - self.environment.errorHandle, - numIters, 0, - lltype.nullptr(roci.OCISnapshot.TO), - lltype.nullptr(roci.OCISnapshot.TO), - mode) - try: - self.environment.checkForError( - status, "Cursor_InternalExecute()") - except OperationError, e: - self._setErrorOffset(space, e) - raise - finally: - self._setRowCount() - - def _getStatementType(self): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_STMT_TYPE, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_GetStatementType()") - self.statementType = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - self.fetchVariables = None - - def getDescription(self, space): - "Return a list of 7-tuples consisting of the description of " - "the define variables" - - # make sure the cursor is open - self._checkOpen(space) - - # fixup bound cursor, if necessary - self._fixupBoundCursor() - - # if not a query, return None - if self.statementType != roci.OCI_STMT_SELECT: - return - - # determine number of items in select-list - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub1).TO, 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARAM_COUNT, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_GetDescription()") - numItems = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - return space.newlist( - [space.newtuple(self._itemDescription(space, i + 1)) - for i in range(numItems)]) - - def _itemDescription(self, space, pos): - "Return a tuple describing the item at the given position" - - # acquire parameter descriptor - paramptr = lltype.malloc(roci.Ptr(roci.OCIParam).TO, - 1, flavor='raw') - try: - status = roci.OCIParamGet( - self.handle, roci.OCI_HTYPE_STMT, - self.environment.errorHandle, - rffi.cast(roci.dvoidpp, paramptr), - pos) - self.environment.checkForError( - status, - "Cursor_GetDescription(): parameter") - param = paramptr[0] - finally: - lltype.free(paramptr, flavor='raw') - - try: - # acquire usable type of item - varType = interp_variable.typeByOracleDescriptor( - param, self.environment) - - # acquire internal size of item - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_DATA_SIZE, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): internal size") - internalSize = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - # acquire name of item - nameptr = lltype.malloc(rffi.CArrayPtr(roci.oratext).TO, 1, - flavor='raw') - lenptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, nameptr), - lenptr, - roci.OCI_ATTR_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): name") - name = rffi.charpsize2str(nameptr[0], rffi.cast(lltype.Signed, lenptr[0])) - finally: - lltype.free(nameptr, flavor='raw') - lltype.free(lenptr, flavor='raw') - - # lookup precision and scale - if varType is interp_variable.VT_Float: - attrptr = lltype.malloc(rffi.CArrayPtr(roci.sb1).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_SCALE, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): scale") - scale = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PRECISION, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): precision") - precision = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - else: - scale = 0 - precision = 0 - - # lookup whether null is permitted for the attribute - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub1).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_IS_NULL, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): nullable") - nullable = rffi.cast(lltype.Signed, attrptr[0]) != 0 - finally: - lltype.free(attrptr, flavor='raw') - - # set display size based on data type - if varType is interp_variable.VT_String: - displaySize = internalSize - elif varType is interp_variable.VT_NationalCharString: - displaySize = internalSize / 2 - elif varType is interp_variable.VT_Binary: - displaySize = internalSize - elif varType is interp_variable.VT_FixedChar: - displaySize = internalSize - elif varType is interp_variable.VT_FixedNationalChar: - displaySize = internalSize / 2 - elif varType is interp_variable.VT_Float: - if precision: - displaySize = precision + 1 - if scale > 0: - displaySize += scale + 1 - else: - displaySize = 127 - elif varType is interp_variable.VT_DateTime: - displaySize = 23 - else: - displaySize = -1 - - # return the tuple - return [space.wrap(name), space.gettypeobject(varType.typedef), - space.wrap(displaySize), space.wrap(internalSize), - space.wrap(precision), space.wrap(scale), - space.wrap(nullable)] - - finally: - roci.OCIDescriptorFree(param, roci.OCI_DTYPE_PARAM) - - def _setBindVariablesByPos(self, space, - w_vars, numElements, arrayPos, defer): - "handle positional binds" - # make sure positional and named binds are not being intermixed - if self.bindDict is not None: - raise OperationError( - get(space).w_ProgrammingError, - space.wrap("positional and named binds cannot be intermixed")) - - if self.bindList is None: - self.bindList = [] - - vars_w = space.fixedview(w_vars) - for i in range(len(vars_w)): - w_value = vars_w[i] - if i < len(self.bindList): - origVar = self.bindList[i] - if space.is_w(origVar, space.w_None): - origVar = None - else: - origVar = None - newVar = self._setBindVariableHelper(space, w_value, origVar, - numElements, arrayPos, defer) - if newVar: - if i < len(self.bindList): - self.bindList[i] = newVar - else: - assert i == len(self.bindList) - self.bindList.append(newVar) - - def _setBindVariablesByName(self, space, - w_vars, numElements, arrayPos, defer): - "handle named binds" - # make sure positional and named binds are not being intermixed - if self.bindList is not None: - raise OperationError( - get(space).w_ProgrammingError, - space.wrap("positional and named binds cannot be intermixed")) - - if self.bindDict is None: - self.bindDict = space.newdict() - - items = space.fixedview(space.call_method(w_vars, "iteritems")) - for item in items: - w_key, w_value = space.fixedview(item, 2) - origVar = space.finditem(self.bindDict, w_key) - newVar = self._setBindVariableHelper(space, w_value, origVar, - numElements, arrayPos, defer) - if newVar: - space.setitem(self.bindDict, w_key, newVar) - - def _setBindVariableHelper(self, space, w_value, origVar, - numElements, arrayPos, defer): - - valueIsVariable = space.isinstance_w(w_value, get(space).w_Variable) - newVar = None - - # handle case where variable is already bound - if origVar: - assert isinstance(origVar, interp_variable.W_Variable) - - # if the value is a variable object, rebind it if necessary - if valueIsVariable: - newVar = space.interp_w(interp_variable.W_Variable, w_value) - assert isinstance(newVar, interp_variable.W_Variable) - if newVar == origVar: - newVar = None - - # if the number of elements has changed, create a new variable - # this is only necessary for executemany() since execute() always - # passes a value of 1 for the number of elements - elif numElements > origVar.allocatedElements: - newVar = origVar.clone( - self, numElements, origVar.size) - assert isinstance(newVar, interp_variable.W_Variable) - newVar.setValue(space, arrayPos, w_value) - - # otherwise, attempt to set the value - else: - try: - origVar.setValue(space, arrayPos, w_value) - except OperationError, e: - # executemany() should simply fail after the first element - if arrayPos > 0: - raise - # anything other than IndexError or TypeError should fail - if (not e.match(space, space.w_IndexError) and - not e.match(space, space.w_TypeError)): - raise - # catch the exception and try to create a new variable - origVar = None - - if not origVar: - # if the value is a variable object, bind it directly - if valueIsVariable: - newVar = space.interp_w(interp_variable.W_Variable, w_value) - assert isinstance(newVar, interp_variable.W_Variable) - newVar.boundPos = 0 - newVar.boundName = None - - # otherwise, create a new variable, unless the value is None and - # we wish to defer type assignment - elif not space.is_w(w_value, space.w_None) or not defer: - newVar = interp_variable.newVariableByValue(space, self, - w_value, - numElements) - assert isinstance(newVar, interp_variable.W_Variable) - newVar.setValue(space, arrayPos, w_value) - - assert newVar is None or isinstance(newVar, interp_variable.W_Variable) - return newVar - - def _performBind(self, space): - # set values and perform binds for all bind variables - if self.bindList: - for i in range(len(self.bindList)): - var = self.bindList[i] - assert isinstance(var, interp_variable.W_Variable) - var.bind(space, self, None, i + 1) - if self.bindDict: - items_w = space.fixedview( - space.call_method(self.bindDict, "iteritems")) - for w_item in items_w: - w_key, var = space.fixedview(w_item, 2) - assert isinstance(var, interp_variable.W_Variable) - var.bind(space, self, w_key, 0) - - # ensure that input sizes are reset - self.setInputSizes = False - - def _setRowCount(self): - if self.statementType == roci.OCI_STMT_SELECT: - self.rowCount = 0 - self.actualRows = -1 - self.rowNum = 0 - elif self.statementType in (roci.OCI_STMT_INSERT, - roci.OCI_STMT_UPDATE, - roci.OCI_STMT_DELETE): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_ROW_COUNT, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_SetRowCount()") - self.rowCount = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - else: - self.rowCount = -1 - - def _performDefine(self): - # determine number of items in select-list - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARAM_COUNT, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_PerformDefine()") - numParams = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - self.fetchVariables = [] - - # define a variable for each select-item - self.fetchArraySize = self.arraySize - for i in range(numParams): - var = interp_variable.define(self, i+1, self.fetchArraySize) - assert isinstance(var, interp_variable.W_Variable) - self.fetchVariables.append(var) - - def _verifyFetch(self, space): - # make sure the cursor is open - self._checkOpen(space) - - # fixup bound cursor, if necessary - self._fixupBoundCursor() - - # make sure the cursor is for a query - if self.statementType != roci.OCI_STMT_SELECT: - raise OperationError( - get(space).w_InterfaceError, - space.wrap("not a query")) - - def _fixupBoundCursor(self): - if self.handle and self.statementType < 0: - self._getStatementType() - if self.statementType == roci.OCI_STMT_SELECT: - self._performDefine() - self._setRowCount() - - def fetchone(self, space): - # verify fetch can be performed - self._verifyFetch(space) - - # setup return value - if self._moreRows(space): - return self._createRow(space) - - return space.w_None - - def fetchmany(self, space, w_numRows=None): - if w_numRows is not None: - numRows = space.int_w(w_numRows) - else: - numRows = self.arraySize - - # verify fetch can be performed - self._verifyFetch(space) - - return self._multiFetch(space, limit=numRows) - - def fetchall(self, space): - # verify fetch can be performed - self._verifyFetch(space) - - return self._multiFetch(space, limit=0) - - def descr_iter(self, space): - self._verifyFetch(space) - return space.wrap(self) - - def descr_next(self, space): - # verify fetch can be performed - self._verifyFetch(space) - - # setup return value - if self._moreRows(space): - return self._createRow(space) - - raise OperationError(space.w_StopIteration, space.w_None) - - def _moreRows(self, space): - if self.rowNum < self.actualRows: - return True - if self.actualRows < 0 or self.actualRows == self.fetchArraySize: - self._internalFetch(space, self.fetchArraySize) - if self.rowNum < self.actualRows: - return True - - return False - - def _internalFetch(self, space, numRows): - if not self.fetchVariables: - raise OperationError( - get(space).w_InterfaceError, - space.wrap("query not executed")) - - status = roci.OCIStmtFetch( - self.handle, - self.environment.errorHandle, - numRows, - roci.OCI_FETCH_NEXT, - roci.OCI_DEFAULT) - - if status != roci.OCI_NO_DATA: - self.environment.checkForError( - status, - "Cursor_InternalFetch(): fetch") - - for var in self.fetchVariables: - assert isinstance(var, interp_variable.W_Variable) - var.internalFetchNum += 1 - - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_ROW_COUNT, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_InternalFetch(): row count") - - self.actualRows = (rffi.cast(lltype.Signed, attrptr[0]) - - self.rowCount) - self.rowNum = 0 - finally: - lltype.free(attrptr, flavor='raw') - - def _multiFetch(self, space, limit=0): - results_w = [] - rowNum = 0 - - # fetch as many rows as possible - while limit == 0 or rowNum < limit: - rowNum += 1 - if not self._moreRows(space): - break - w_row = self._createRow(space) - results_w.append(w_row) - return space.newlist(results_w) - - def _createRow(self, space): - items_w = [] - # acquire the value for each item - for var in self.fetchVariables: - assert isinstance(var, interp_variable.W_Variable) - w_item = var.getValue(space, self.rowNum) - items_w.append(w_item) - - # increment row counters - self.rowNum += 1 - self.rowCount += 1 - - w_row = space.newtuple(items_w) - - # if a row factory is defined, call it - if self.w_rowFactory: - w_row = space.call(self.w_rowFactory, w_row) - - return w_row - - def _get_bind_info(self, space, numElements): - # avoid bus errors on 64bit platforms - numElements = numElements + (rffi.sizeof(roci.dvoidp) - - numElements % rffi.sizeof(roci.dvoidp)) - # initialize the buffers - bindNames = lltype.malloc(roci.Ptr(roci.oratext).TO, - numElements, flavor='raw') - bindNameLengths = lltype.malloc(roci.Ptr(roci.ub1).TO, - numElements, flavor='raw') - indicatorNames = lltype.malloc(roci.Ptr(roci.oratext).TO, - numElements, flavor='raw') - indicatorNameLengths = lltype.malloc(roci.Ptr(roci.ub1).TO, - numElements, flavor='raw') - duplicate = lltype.malloc(roci.Ptr(roci.ub1).TO, - numElements, flavor='raw') - bindHandles = lltype.malloc(roci.Ptr(roci.OCIBind).TO, - numElements, flavor='raw') - - foundElementsPtr = lltype.malloc(roci.Ptr(roci.sb4).TO, 1, - flavor='raw') - - try: - status = roci.OCIStmtGetBindInfo( - self.handle, - self.environment.errorHandle, - numElements, - 1, - foundElementsPtr, - bindNames, bindNameLengths, - indicatorNames, indicatorNameLengths, - duplicate, bindHandles) - if status != roci.OCI_NO_DATA: - self.environment.checkForError( - status, "Cursor_GetBindNames()") - - # Too few elements allocated - foundElements = rffi.cast(lltype.Signed, foundElementsPtr[0]) - if foundElements < 0: - return -foundElements, None - - names_w = [] - # process the bind information returned - for i in range(foundElements): - if rffi.cast(lltype.Signed, duplicate[i]): - continue - names_w.append( - w_string(space, - bindNames[i], - rffi.cast(lltype.Signed, bindNameLengths[i]))) - - return 0, names_w - finally: - lltype.free(bindNames, flavor='raw') - lltype.free(bindNameLengths, flavor='raw') - lltype.free(indicatorNames, flavor='raw') - lltype.free(indicatorNameLengths, flavor='raw') - lltype.free(duplicate, flavor='raw') - lltype.free(bindHandles, flavor='raw') - lltype.free(foundElementsPtr, flavor='raw') - - def bindnames(self, space): - # make sure the cursor is open - self._checkOpen(space) - - # ensure that a statement has already been prepared - if not self.w_statement: - raise OperationError(get(space).w_ProgrammingError, - space.wrap("statement must be prepared first")) - - nbElements, names = self._get_bind_info(space, 8) - if nbElements: - _, names = self._get_bind_info(space, nbElements) - return space.newlist(names) - - @unwrap_spec(size=int) - def var(self, space, w_type, size=0, w_arraysize=None, - w_inconverter=None, w_outconverter=None): - if space.is_none(w_arraysize): - arraySize = self.bindArraySize - else: - arraySize = space.int_w(w_arraysize) - - # determine the type of variable - varType = interp_variable.typeByPythonType(space, self, w_type) - if varType.isVariableLength and size == 0: - size = varType.size - - # create the variable - var = varType(self, arraySize, size) - var.w_inconverter = w_inconverter - var.w_outconverter = w_outconverter - - return space.wrap(var) - - @unwrap_spec(size=int) - def arrayvar(self, space, w_type, w_value, size=0): - # determine the type of variable - varType = interp_variable.typeByPythonType(space, self, w_type) - if varType.isVariableLength and size == 0: - size = varType.size - - # determine the number of elements to create - if space.isinstance_w(w_value, space.w_list): - numElements = space.len_w(w_value) - elif space.isinstance_w(w_value, space.w_int): - numElements = space.int_w(w_value) - else: - raise OperationError( - get(space).w_NotSupportedError, - space.wrap("expecting integer or list of values")) - - # create the variable - var = varType(self, numElements, size) - var.makeArray(space) - - # set the value, if applicable - if space.isinstance_w(w_value, space.w_list): - var.setArrayValue(space, w_value) - - return var - - def setinputsizes(self, space, __args__): - args_w, kw_w = __args__.unpack() - - # only expect keyword arguments or positional arguments, not both - if args_w and kw_w: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap( - "expecting argument or keyword arguments, not both")) - - # make sure the cursor is open - self._checkOpen(space) - - # eliminate existing bind variables - self.bindList = None - self.bindDict = None - - self.setInputSizes = True - - # process each input - if kw_w: - self.bindDict = space.newdict() - for key, w_value in kw_w.iteritems(): - var = interp_variable.newVariableByType( - space, self, w_value, self.bindArraySize) - space.setitem(self.bindDict, space.wrap(key), var) - return self.bindDict - else: - self.bindList = [None] * len(args_w) - for i in range(len(args_w)): - w_value = args_w[i] - if space.is_w(w_value, space.w_None): - var = None - else: - var = interp_variable.newVariableByType( - space, self, w_value, self.bindArraySize) - self.bindList[i] = var - return space.newlist(self.bindList) - - @unwrap_spec(outputSize=int, outputSizeColumn=int) - def setoutputsize(self, space, outputSize, outputSizeColumn=-1): - self.outputSize = outputSize - self.outputSizeColumn = outputSizeColumn - - - def arraysize_get(self, space): - return space.wrap(self.arraySize) - def arraysize_set(self, space, w_value): - self.arraySize = space.int_w(w_value) - - def bindarraysize_get(self, space): - return space.wrap(self.bindArraySize) - def bindarraysize_set(self, space, w_value): - self.bindArraySize = space.int_w(w_value) - - def bindvars_get(self, space): - if self.bindList: - return space.newlist(self.bindList) - if self.bindDict: - return self.bindDict - - def fetchvars_get(self, space): - return space.newlist(self.fetchVariables) - -W_Cursor.typedef = TypeDef( - 'Cursor', - execute = interp2app(W_Cursor.execute), - executemany = interp2app(W_Cursor.executemany), - prepare = interp2app(W_Cursor.prepare), - fetchone = interp2app(W_Cursor.fetchone), - fetchmany = interp2app(W_Cursor.fetchmany), - fetchall = interp2app(W_Cursor.fetchall), - close = interp2app(W_Cursor.close), - bindnames = interp2app(W_Cursor.bindnames), - callfunc = interp2app(W_Cursor.callfunc), - callproc = interp2app(W_Cursor.callproc), - var = interp2app(W_Cursor.var), - arrayvar = interp2app(W_Cursor.arrayvar), - setinputsizes = interp2app(W_Cursor.setinputsizes), - setoutputsize = interp2app(W_Cursor.setoutputsize), - - __iter__ = interp2app(W_Cursor.descr_iter), - next = interp2app(W_Cursor.descr_next), - - arraysize = GetSetProperty(W_Cursor.arraysize_get, - W_Cursor.arraysize_set), - bindarraysize = GetSetProperty(W_Cursor.bindarraysize_get, - W_Cursor.bindarraysize_set), - rowcount = interp_attrproperty('rowCount', W_Cursor), - statement = interp_attrproperty_w('w_statement', W_Cursor), - bindvars = GetSetProperty(W_Cursor.bindvars_get), - fetchvars = GetSetProperty(W_Cursor.fetchvars_get), - description = GetSetProperty(W_Cursor.getDescription), -) diff --git a/pypy/module/oracle/interp_environ.py b/pypy/module/oracle/interp_environ.py deleted file mode 100644 --- a/pypy/module/oracle/interp_environ.py +++ /dev/null @@ -1,99 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.oracle import roci, config -from pypy.interpreter.error import OperationError - -from pypy.module.oracle.interp_error import W_Error, get - -class Environment(object): - def __init__(self, space, handle): - self.space = space - self.handle = handle - - # create the error handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIError).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.handle, - handleptr, roci.OCI_HTYPE_ERROR, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.checkForError( - status, "Environment_New(): create error handle") - self.errorHandle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - - def checkForError(self, status, context): - if status in (roci.OCI_SUCCESS, roci.OCI_SUCCESS_WITH_INFO): - return - - if status != roci.OCI_INVALID_HANDLE: - # At this point it is assumed that the Oracle - # environment is fully initialized - error = W_Error(self.space, self, context, 1) - if error.code in (1, 1400, 2290, 2291, 2292): - w_type = get(self.space).w_IntegrityError - elif error.code in (1012, 1033, 1034, 1089, 3113, 3114, - 12203, 12500, 12571): - w_type = get(self.space).w_OperationalError - else: - w_type = get(self.space).w_DatabaseError - raise OperationError(w_type, self.space.wrap(error)) - - error = W_Error(self.space, self, context, 0) - error.code = 0 - error.w_message = self.space.wrap("Invalid handle!") - raise OperationError(get(self.space).w_DatabaseError, - self.space.wrap(error)) - - @staticmethod - def create(space, threaded, events): - "Create a new environment object from scratch" - mode = roci.OCI_OBJECT - if threaded: - mode |= roci.OCI_THREADED - if events: - mode |= roci.OCI_EVENTS - - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIEnv).TO, - 1, flavor='raw') - - try: - - status = roci.OCIEnvNlsCreate( - handleptr, mode, - None, - None, None, None, - 0, lltype.nullptr(rffi.CArray(roci.dvoidp)), - config.CHARSETID, config.CHARSETID) - - if not handleptr[0] or status not in (roci.OCI_SUCCESS, - roci.OCI_SUCCESS_WITH_INFO): - raise OperationError( - get(space).w_InterfaceError, - space.wrap( - "Unable to acquire Oracle environment handle")) - - handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - try: - newenv = Environment(space, handle) - except: - roci.OCIHandleFree(handle, roci.OCI_HTYPE_ENV) - raise - - newenv.maxBytesPerCharacter = config.BYTES_PER_CHAR - newenv.maxStringBytes = config.BYTES_PER_CHAR * config.MAX_STRING_CHARS From noreply at buildbot.pypy.org Tue Apr 29 23:45:43 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 29 Apr 2014 23:45:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed oracle from list of modules Message-ID: <20140429214543.093061C35CF@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71065:db0e3930b89d Date: 2014-04-29 14:44 -0700 http://bitbucket.org/pypy/pypy/changeset/db0e3930b89d/ Log: Removed oracle from list of modules diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -58,7 +58,6 @@ math mmap operator - oracle parser posix pyexpat From noreply at buildbot.pypy.org Wed Apr 30 00:12:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 00:12:41 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1748: provide array.__iter__ Message-ID: <20140429221241.467561C01CB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71066:a199610f2d6a Date: 2014-04-29 15:11 -0700 http://bitbucket.org/pypy/pypy/changeset/a199610f2d6a/ Log: issue1748: provide array.__iter__ diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -448,6 +448,9 @@ self.descr_delitem(space, space.newslice(w_start, w_stop, space.w_None)) + def descr_iter(self, space): + return space.newseqiter(self) + def descr_add(self, space, w_other): raise NotImplementedError @@ -503,6 +506,7 @@ __setslice__ = interp2app(W_ArrayBase.descr_setslice), __delitem__ = interp2app(W_ArrayBase.descr_delitem), __delslice__ = interp2app(W_ArrayBase.descr_delslice), + __iter__ = interp2app(W_ArrayBase.descr_iter), __add__ = interpindirect2app(W_ArrayBase.descr_add), __iadd__ = interpindirect2app(W_ArrayBase.descr_inplace_add), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -697,6 +697,8 @@ for i in a: b.append(i) assert repr(b) == "array('i', [1, 2, 3])" + assert hasattr(b, '__iter__') + assert next(b.__iter__()) == 1 def test_lying_iterable(self): class lier(object): From noreply at buildbot.pypy.org Wed Apr 30 00:19:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 00:19:40 +0200 (CEST) Subject: [pypy-commit] pypy default: utilize %T for py3k (don't need the full get_module_type_name anyway) Message-ID: <20140429221940.94A4F1C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71067:c4296bb01a13 Date: 2014-04-29 15:13 -0700 http://bitbucket.org/pypy/pypy/changeset/c4296bb01a13/ Log: utilize %T for py3k (don't need the full get_module_type_name anyway) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1415,10 +1415,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - name = "None" + e = oefmt(self.w_TypeError, "must be %s, not None", expected) else: - name = self.type(w_obj).get_module_type_name() - raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + raise e @specialize.arg(1) def getarg_w(self, code, w_obj): From noreply at buildbot.pypy.org Wed Apr 30 00:19:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 00:19:41 +0200 (CEST) Subject: [pypy-commit] pypy default: shortcut through setitem_str when possible Message-ID: <20140429221941.CCFB91C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71068:de6c6c698d53 Date: 2014-04-29 15:14 -0700 http://bitbucket.org/pypy/pypy/changeset/de6c6c698d53/ Log: shortcut through setitem_str when possible diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -321,10 +321,11 @@ limit -= len(self.keyword_names_w) for i in range(len(self.keywords)): if i < limit: - w_key = space.wrap(self.keywords[i]) + key = self.keywords[i] + space.setitem_str(w_kwds, key, self.keywords_w[i]) else: w_key = self.keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, self.keywords_w[i]) + space.setitem(w_kwds, w_key, self.keywords_w[i]) return w_args, w_kwds # JIT helper functions @@ -416,10 +417,10 @@ break else: if i < limit: - w_key = space.wrap(keywords[i]) + space.setitem_str(w_kwds, keywords[i], keywords_w[i]) else: w_key = keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, keywords_w[i]) + space.setitem(w_kwds, w_key, keywords_w[i]) # # ArgErr family of exceptions raised in case of argument mismatch. diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -93,6 +93,7 @@ def setitem(self, obj, key, value): obj[key] = value + setitem_str = setitem def getitem(self, obj, key): return obj[key] From noreply at buildbot.pypy.org Wed Apr 30 00:19:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 00:19:42 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140429221942.F1F7F1C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71069:feea2dc3da98 Date: 2014-04-29 15:18 -0700 http://bitbucket.org/pypy/pypy/changeset/feea2dc3da98/ Log: cleanup diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -1,20 +1,17 @@ -""" -Implementation of the 'buffer' and 'memoryview' types. -""" +"""Implementation of the 'buffer' type""" import operator from rpython.rlib.buffer import Buffer, SubBuffer +from rpython.rlib.objectmodel import compute_hash + from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from rpython.rlib.objectmodel import compute_hash class W_Buffer(W_Root): - """Implement the built-in 'buffer' type as a wrapper around - an interp-level buffer. - """ + """The 'buffer' type: a wrapper around an interp-level buffer""" def __init__(self, buf): assert isinstance(buf, Buffer) @@ -29,8 +26,7 @@ def writebuf_w(self, space): if self.buf.readonly: - raise OperationError(space.w_TypeError, space.wrap( - "buffer is read-only")) + raise oefmt(space.w_TypeError, "buffer is read-only") return self.buf def charbuf_w(self, space): @@ -44,11 +40,9 @@ return W_Buffer(buf) # handle buffer slices if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) + raise oefmt(space.w_ValueError, "offset must be zero or positive") if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) + raise oefmt(space.w_ValueError, "size must be zero or positive") buf = SubBuffer(buf, offset, size) return W_Buffer(buf) @@ -56,7 +50,8 @@ return space.wrap(self.buf.getlength()) def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + start, stop, step, size = space.decode_index4(w_index, + self.buf.getlength()) if step == 0: # index only return space.wrap(self.buf.getitem(start)) res = self.buf.getslice(start, stop, step, size) @@ -64,19 +59,19 @@ def descr_setitem(self, space, w_index, w_obj): if self.buf.readonly: - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + raise oefmt(space.w_TypeError, "buffer is read-only") + start, stop, step, size = space.decode_index4(w_index, + self.buf.getlength()) value = space.readbuf_w(w_obj) if step == 0: # index only if value.getlength() != 1: - msg = "right operand must be a single byte" - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise oefmt(space.w_TypeError, + "right operand must be a single byte") self.buf.setitem(start, value.getitem(0)) else: if value.getlength() != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise oefmt(space.w_TypeError, + "right operand length must match slice length") if step == 1: self.buf.setslice(start, value.as_str()) else: From noreply at buildbot.pypy.org Wed Apr 30 01:11:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:11:57 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default (8a2e9e0c1676) Message-ID: <20140429231157.7FF941C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71070:67528c976db5 Date: 2014-04-28 18:04 -0700 http://bitbucket.org/pypy/pypy/changeset/67528c976db5/ Log: merge default (8a2e9e0c1676) diff too long, truncating to 2000 out of 3538 lines diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,3 +9,6 @@ Improve optimiziation of small allocation-heavy loops in the JIT .. branch: reflex-support + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) + compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX @@ -193,14 +193,37 @@ def immutable_unique_id(self, space): return None - def buffer_w(self, space): + def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_memoryview): - return w_result.buffer_w(space) - raise oefmt(space.w_TypeError, - "'%T' does not support the buffer interface", self) + return w_result.buffer_w(space, flags) + raise TypeError + + def readbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_memoryview): + return w_result.readbuf_w(space) + raise TypeError + + def writebuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_memoryview): + return w_result.writebuf_w(space) + raise TypeError + + def charbuf_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_memoryview): + return w_result.charbuf_w(space) + raise TypeError def bytes_w(self, space): self._typed_unwrap_error(space, "bytes") @@ -1332,24 +1355,109 @@ self.wrap('cannot convert negative integer ' 'to unsigned int')) - def buffer_w(self, w_obj): - return w_obj.buffer_w(self) + BUF_SIMPLE = 0x0000 + BUF_WRITABLE = 0x0001 + BUF_FORMAT = 0x0004 + BUF_ND = 0x0008 + BUF_STRIDES = 0x0010 | BUF_ND + BUF_INDIRECT = 0x0100 | BUF_STRIDES - def rwbuffer_w(self, w_obj): - # returns a RWBuffer instance - from pypy.interpreter.buffer import RWBuffer - buffer = self.buffer_w(w_obj) - if not isinstance(buffer, RWBuffer): - raise OperationError(self.w_TypeError, - self.wrap('read-write buffer expected')) - return buffer + BUF_CONTIG_RO = BUF_ND + BUF_CONTIG = BUF_ND | BUF_WRITABLE - def bufferstr_new_w(self, w_obj): - # Implement the "new buffer interface" (new in Python 2.7) - # returning an unwrapped string. It doesn't accept unicode - # strings - buffer = self.buffer_w(w_obj) - return buffer.as_str() + BUF_FULL_RO = BUF_INDIRECT | BUF_FORMAT + BUF_FULL = BUF_INDIRECT | BUF_FORMAT | BUF_WRITABLE + + def check_buf_flags(self, flags, readonly): + if readonly and flags & self.BUF_WRITABLE == self.BUF_WRITABLE: + raise oefmt(self.w_BufferError, "Object is not writable.") + + def buffer_w(self, w_obj, flags): + # New buffer interface, returns a buffer based on flags (PyObject_GetBuffer) + try: + return w_obj.buffer_w(self, flags) + except TypeError: + raise oefmt(self.w_TypeError, + "'%T' does not have the buffer interface", w_obj) + + def readbuf_w(self, w_obj): + # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) + try: + return w_obj.readbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a readable buffer object") + + def writebuf_w(self, w_obj): + # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) + try: + return w_obj.writebuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a writeable buffer object") + + def charbuf_w(self, w_obj): + # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) + try: + return w_obj.charbuf_w(self) + except TypeError: + raise oefmt(self.w_TypeError, + "expected a character buffer object") + + def _getarg_error(self, expected, w_obj): + if self.is_none(w_obj): + name = "None" + else: + name = self.type(w_obj).get_module_type_name() + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + + @specialize.arg(1) + def getarg_w(self, code, w_obj): + if code == 'z*': + if self.is_none(w_obj): + return None + code = 's*' + if code == 's*': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.readbuf_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).readbuf_w(self) + try: + return w_obj.buffer_w(self, 0) + except TypeError: + pass + try: + return w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + elif code == 's#': + if self.isinstance_w(w_obj, self.w_str): + return w_obj.str_w(self) + if self.isinstance_w(w_obj, self.w_unicode): + return self.str(w_obj).str_w(self) + try: + return w_obj.readbuf_w(self).as_str() + except TypeError: + self._getarg_error("string or read-only buffer", w_obj) + elif code == 'w*': + try: + try: + return w_obj.buffer_w(self, self.BUF_WRITABLE) + except OperationError: + self._getarg_error("read-write buffer", w_obj) + except TypeError: + pass + try: + return w_obj.writebuf_w(self) + except TypeError: + self._getarg_error("read-write buffer", w_obj) + elif code == 't#': + try: + return w_obj.charbuf_w(self) + except TypeError: + self._getarg_error("string or read-only character buffer", w_obj) + else: + assert False def bufferstr0_new_w(self, w_obj): from rpython.rlib import rstring @@ -1359,6 +1467,7 @@ 'argument must be a string without NUL characters')) return rstring.assert_str0(result) + # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): @@ -1373,8 +1482,18 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - buffer = self.buffer_w(w_obj) - return buffer.as_str() + try: + buf = w_obj.buffer_w(self, 0) + except TypeError: + pass + else: + return buf.as_str() + try: + buf = w_obj.readbuf_w(self) + except TypeError: + self._getarg_error("string or buffer", w_obj) + else: + return buf.as_str() def bufferstr_or_u_w(self, w_obj): """Returns an interp-level str, directly if possible. diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py deleted file mode 100644 --- a/pypy/interpreter/buffer.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Buffer protocol support. -""" -from rpython.rlib.objectmodel import import_from_mixin - - -class Buffer(object): - """Abstract base class for buffers.""" - __slots__ = [] - - def getlength(self): - raise NotImplementedError - - def as_str(self): - "Returns an interp-level string with the whole content of the buffer." - # May be overridden. - return self.getslice(0, self.getlength(), 1, self.getlength()) - - def getitem(self, index): - "Returns the index'th character in the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def getslice(self, start, stop, step, size): - # May be overridden. No bounds checks. - return ''.join([self.getitem(i) for i in range(start, stop, step)]) - - def get_raw_address(self): - raise ValueError("no raw buffer") - - def is_writable(self): - return False - - -class RWBuffer(Buffer): - """Abstract base class for read-write buffers.""" - __slots__ = [] - - def is_writable(self): - return True - - def setitem(self, index, char): - "Write a character into the buffer." - raise NotImplementedError # Must be overriden. No bounds checks. - - def setslice(self, start, string): - # May be overridden. No bounds checks. - for i in range(len(string)): - self.setitem(start + i, string[i]) - - -class StringBuffer(Buffer): - __slots__ = ['value'] - - def __init__(self, value): - self.value = value - - def getlength(self): - return len(self.value) - - def as_str(self): - return self.value - - def getitem(self, index): - return self.value[index] - - def getslice(self, start, stop, step, size): - if size == 0: - return "" - if step == 1: - assert 0 <= start <= stop - return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) -# ____________________________________________________________ - - -class SubBufferMixin(object): - _attrs_ = ['buffer', 'offset', 'size'] - - def __init__(self, buffer, offset, size): - self.buffer = buffer - self.offset = offset - self.size = size - - def getlength(self): - at_most = self.buffer.getlength() - self.offset - if 0 <= self.size <= at_most: - return self.size - elif at_most >= 0: - return at_most - else: - return 0 - - def getitem(self, index): - return self.buffer.getitem(self.offset + index) - - def getslice(self, start, stop, step, size): - if start == stop: - return '' # otherwise, adding self.offset might make them - # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, - step, size) - - -class SubBuffer(Buffer): - import_from_mixin(SubBufferMixin) - - -class RWSubBuffer(RWBuffer): - import_from_mixin(SubBufferMixin) - - def setitem(self, index, char): - self.buffer.setitem(self.offset + index, char) - - def setslice(self, start, string): - if len(string) == 0: - return # otherwise, adding self.offset might make 'start' - # out of bounds - self.buffer.setslice(self.offset + start, string) diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py deleted file mode 100644 --- a/pypy/interpreter/test/test_buffer.py +++ /dev/null @@ -1,37 +0,0 @@ -import py -from rpython.tool.udir import udir - -testdir = udir.ensure('test_buffer', dir=1) - - -class TestBuffer: - def test_buffer_w(self): - space = self.space - w_hello = space.wrapbytes('hello world') - buf = space.buffer_w(w_hello) - assert buf.getlength() == 11 - assert buf.as_str() == 'hello world' - assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.newbuffer(buf)) is buf - assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' - space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - e = space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - message = space.unwrap(e.value.get_w_value(space)) - assert "'int' does not support the buffer interface" == message - - def test_file_write(self): - space = self.space - w_buffer = space.newbuffer(space.buffer_w(space.wrapbytes('hello world'))) - filename = str(testdir.join('test_file_write')) - space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): - f = open(filename, 'wb') - f.write(buffer) - f.close() - """) - f = open(filename, 'rb') - data = f.read() - f.close() - assert data == 'hello world' - -# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -2,13 +2,16 @@ # A convenient read-write buffer. Located here for want of a better place. # -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec -class ByteBuffer(RWBuffer): +class ByteBuffer(Buffer): + _immutable_ = True + def __init__(self, len): self.data = ['\x00'] * len + self.readonly = False def getlength(self): return len(self.data) diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -12,3 +12,18 @@ assert b[-1] == b'*' assert b[-2] == b'-' assert b[-3] == b'+' + exc = raises(TypeError, "b[3] = b'abc'") + assert str(exc.value) == "right operand must be a single byte" + exc = raises(TypeError, "b[3:5] = b'abc'") + assert str(exc.value) == "right operand length must match slice length" + exc = raises(TypeError, "b[3:7:2] = b'abc'") + assert str(exc.value) == "right operand length must match slice length" + + b = bytebuffer(10) + b[1:3] = b'xy' + assert bytes(b) == b"\x00xy" + b"\x00" * 7 + b[4:8:2] = b'zw' + assert bytes(b) == b"\x00xy\x00z\x00w" + b"\x00" * 3 + r = str(buffer(u'#')) + b[6:6+len(r)] = u'#' + assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,22 +1,23 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray from pypy.objspace.std.memoryview import _buffer_setitem +from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw -class LLBuffer(RWBuffer): +class LLBuffer(Buffer): _immutable_ = True def __init__(self, raw_cdata, size): self.raw_cdata = raw_cdata self.size = size + self.readonly = False def getlength(self): return self.size @@ -33,7 +34,7 @@ def getslice(self, start, stop, step, size): if step == 1: return rffi.charpsize2str(rffi.ptradd(self.raw_cdata, start), size) - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -72,7 +72,7 @@ 'utf_32_le_decode' : 'interp_codecs.utf_32_le_decode', 'utf_32_le_encode' : 'interp_codecs.utf_32_le_encode', 'utf_32_ex_decode' : 'interp_codecs.utf_32_ex_decode', - 'readbuffer_encode': 'interp_codecs.buffer_encode', + 'readbuffer_encode': 'interp_codecs.readbuffer_encode', 'charmap_decode' : 'interp_codecs.charmap_decode', 'charmap_encode' : 'interp_codecs.charmap_encode', 'escape_encode' : 'interp_codecs.escape_encode', diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -423,8 +423,9 @@ w_res = space.call_function(w_encoder, w_obj, space.wrap(errors)) return space.getitem(w_res, space.wrap(0)) - at unwrap_spec(s='bufferstr_or_u', errors='str_or_None') -def buffer_encode(space, s, errors='strict'): + at unwrap_spec(errors='str_or_None') +def readbuffer_encode(space, w_data, errors='strict'): + s = space.getarg_w('s#', w_data) return space.newtuple([space.wrapbytes(s), space.wrap(len(s))]) @unwrap_spec(errors=str) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -428,14 +428,12 @@ for (i, line) in enumerate(reader): assert line == s[i] - def test_readbuffer_encode(self): - import _codecs - assert _codecs.readbuffer_encode("") == (b"", 0) - - def test_readbuffer_encode_array(self): + def test_buffer_encode(self): import _codecs, array assert (_codecs.readbuffer_encode(array.array('b', b'spam')) == (b'spam', 4)) + assert _codecs.readbuffer_encode(u"test") == (b'test', 4) + assert _codecs.readbuffer_encode("") == (b"", 0) def test_utf8sig(self): import codecs diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -81,7 +81,7 @@ self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.wrap(length)) @@ -107,11 +107,14 @@ readinto = interp2app(W_BufferedIOBase.readinto_w), ) -class RawBuffer(RWBuffer): +class RawBuffer(Buffer): + _immutable_ = True + def __init__(self, buf, start, length): self.buf = buf self.start = start self.length = length + self.readonly = False def getlength(self): return self.length @@ -708,7 +711,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -82,7 +82,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) size = rwbuffer.getlength() output = self.read(size) @@ -91,10 +91,7 @@ def write_w(self, space, w_data): self._check_closed(space) - if space.isinstance_w(w_data, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - "bytes string of buffer expected")) - buf = space.bufferstr_w(w_data) + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -340,7 +340,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('s*', w_data).as_str() try: n = os.write(self.fd, data) @@ -373,7 +373,7 @@ def readinto_w(self, space, w_buffer): self._check_closed(space) self._check_readable(space) - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() try: buf = os.read(self.fd, length) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,6 +139,14 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == b'a\nb\ncxxxxx' @@ -250,6 +258,7 @@ raw = _io.FileIO(self.tmpfile, 'w') f = _io.BufferedWriter(raw) f.write(b"abcd") + raises(TypeError, f.write, u"cd") f.close() assert self.readfile() == b"abcd" diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -43,6 +43,8 @@ f = _io.BytesIO() assert f.write(b"") == 0 assert f.write(b"hello") == 5 + exc = raises(TypeError, f.write, u"lo") + assert str(exc.value) == "'unicode' does not have the buffer interface" import gc; gc.collect() assert f.getvalue() == b"hello" f.close() @@ -102,6 +104,14 @@ a2 = bytearray(b'testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + exc = raises(TypeError, b.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, b.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, b.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() assert a1 == b"h" assert a2 == b"elloing" diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -135,6 +135,14 @@ a = bytearray(b'x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + exc = raises(TypeError, f.readinto, u"hello") + assert str(exc.value) == "cannot use unicode as modifiable buffer" + exc = raises(TypeError, f.readinto, buffer(b"hello")) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) + assert str(exc.value) == "must be read-write buffer, not buffer" + exc = raises(TypeError, f.readinto, memoryview(b"hello")) + assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() assert a == b'a\nb\nc\0\0\0\0\0' # diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -81,8 +81,9 @@ raise OperationError(space.w_IOError, space.wrap("connection is read-only")) - @unwrap_spec(buf='bufferstr', offset='index', size='index') - def send_bytes(self, space, buf, offset=0, size=PY_SSIZE_T_MIN): + @unwrap_spec(offset='index', size='index') + def send_bytes(self, space, w_buf, offset=0, size=PY_SSIZE_T_MIN): + buf = space.getarg_w('s*', w_buf).as_str() length = len(buf) self._check_writable(space) if offset < 0: @@ -123,7 +124,7 @@ @unwrap_spec(offset='index') def recv_bytes_into(self, space, w_buffer, offset=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.writebuf_w(w_buffer) length = rwbuffer.getlength() res, newbuf = self.do_recv_string( @@ -150,7 +151,7 @@ w_pickled = space.call_method( w_picklemodule, "dumps", w_obj, w_protocol) - buf = space.bufferstr_w(w_pickled) + buf = space.str_w(w_pickled) self.do_send_string(space, buf, 0, len(buf)) def recv(self, space): diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,12 +1,14 @@ -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer # XXX not the most efficient implementation -class RawFFIBuffer(RWBuffer): +class RawFFIBuffer(Buffer): + _immutable_ = True def __init__(self, datainstance): self.datainstance = datainstance + self.readonly = False def getlength(self): return self.datainstance.getrawsize() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -18,6 +18,7 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.rarithmetic import intmask, r_uint +from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker TYPEMAP = { @@ -359,8 +360,13 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def buffer_w(self, space): - from pypy.module._rawffi.buffer import RawFFIBuffer + def buffer_w(self, space, flags): + return RawFFIBuffer(self) + + def readbuf_w(self, space): + return RawFFIBuffer(self) + + def writebuf_w(self, space): return RawFFIBuffer(self) def getrawsize(self): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1093,6 +1093,12 @@ assert a[3] == b'z' assert a[4] == b't' + b = memoryview(a) + assert len(b) == 10 + assert b[3] == 'z' + b[3] = 'x' + assert b[3] == 'x' + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -457,7 +457,7 @@ @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt @@ -468,7 +468,7 @@ @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): - rwbuffer = space.rwbuffer_w(w_buffer) + rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -534,6 +534,8 @@ s.connect(("www.python.org", 80)) except _socket.gaierror as ex: skip("GAIError - probably no connection: %s" % str(ex.args)) + exc = raises(TypeError, s.send, None) + assert str(exc.value) == "must be string or buffer, not None" assert s.send(memoryview(b'')) == 0 assert s.sendall(memoryview(b'')) is None exc = raises(TypeError, s.send, '') @@ -694,6 +696,13 @@ msg = buf.tobytes()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes = cli.recv_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_recvfrom_into(self): import socket import array @@ -710,6 +719,13 @@ msg = buf.tobytes()[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(1024) + nbytes, addr = cli.recvfrom_into(memoryview(buf)) + assert nbytes == len(MSG) + msg = buf[:len(MSG)] + assert msg == MSG + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -118,7 +118,7 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: - buf = space.buffer_w(w_string) + buf = space.readbuf_w(w_string) if (not space.is_none(self.w_pattern) and space.isinstance_w(self.w_pattern, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap( diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -2,7 +2,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError, wrap_windowserror +from pypy.interpreter.error import OperationError, wrap_windowserror, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32 from rpython.rlib.rarithmetic import r_uint, intmask @@ -327,7 +327,14 @@ buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') buf[0] = '\0' else: - value = space.bufferstr_w(w_value) + try: + value = w_value.readbuf_w(space) + except TypeError: + raise oefmt(space.w_TypeError, + "Objects of type '%T' can not be used as binary " + "registry values", w_value) + else: + value = value.as_str() buflen = len(value) buf = rffi.str2charp(value) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -140,11 +140,15 @@ assert 0, "Did not raise" def test_SetValueEx(self): - from winreg import CreateKey, SetValueEx + from winreg import CreateKey, SetValueEx, REG_BINARY key = CreateKey(self.root_key, self.test_key_name) sub_key = CreateKey(key, "sub_key") for name, value, type in self.test_data: SetValueEx(sub_key, name, 0, type, value) + exc = raises(TypeError, SetValueEx, sub_key, 'test_name', None, + REG_BINARY, memoryview('abc')) + assert str(exc.value) == ("Objects of type 'memoryview' can not " + "be used as binary registry values") def test_readValues(self): from winreg import OpenKey, EnumValue, QueryValueEx, EnumKey diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -1,6 +1,7 @@ from __future__ import with_statement from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable @@ -9,7 +10,6 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( interp2app, interpindirect2app, unwrap_spec) @@ -138,8 +138,11 @@ self.len = 0 self.allocated = 0 - def buffer_w(self, space): - return ArrayBuffer(self) + def readbuf_w(self, space): + return ArrayBuffer(self, True) + + def writebuf_w(self, space): + return ArrayBuffer(self, False) def descr_append(self, space, w_x): """ append(x) @@ -247,9 +250,8 @@ self._charbuf_stop() return self.space.wrapbytes(s) - @unwrap_spec(s='bufferstr_or_u') - def descr_fromstring(self, space, s): - """fromstring(string) + def descr_fromstring(self, space, w_s): + """ fromstring(string) Appends items from the string, interpreting it as an array of machine values, as if it had been read from a file using the @@ -257,6 +259,7 @@ This method is deprecated. Use frombytes instead. """ + s = space.getarg_w('s#', w_s) msg = "fromstring() is deprecated. Use frombytes() instead." space.warn(space.wrap(msg), self.space.w_DeprecationWarning) self.descr_frombytes(space, s) @@ -303,7 +306,7 @@ self.descr_frombytes(space, item) msg = "not enough items in file" raise OperationError(space.w_EOFError, space.wrap(msg)) - self.descr_fromstring(space, item) + self.descr_fromstring(space, w_item) def descr_tofile(self, space, w_f): """ tofile(f) @@ -628,9 +631,12 @@ v.typecode = k unroll_typecodes = unrolling_iterable(types.keys()) -class ArrayBuffer(RWBuffer): - def __init__(self, array): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, array, readonly): self.array = array + self.readonly = readonly def getlength(self): return self.array.len * self.array.itemsize diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -131,7 +131,14 @@ raises(OverflowError, a.append, 2 ** (8 * b)) def test_fromstring(self): - a = self.array('l') + a = self.array('b') + a.fromstring('Hi!') + assert a[0] == b'H' and a[1] == b'i' and a[2] == b'!' and len(a) == 3 + a = self.array('b') + exc = raises(TypeError, a.fromstring, memoryview(b'xyz')) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + assert a[0] == b'x' and a[1] == b'y' and a[2] == b'z' and len(a) == 3 + a = self.array('b') a.fromstring('') assert not len(a) @@ -404,7 +411,6 @@ def test_buffer_write(self): a = self.array('b', b'hello') buf = memoryview(a) - print(repr(buf)) try: buf[3] = b'L' except TypeError: diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj) + buf = space.buffer_w(w_obj, space.BUF_SIMPLE) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value) + buf = space.buffer_w(w_value, space.BUF_SIMPLE) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,7 +22,6 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod -from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.micronumpy.base import W_NDimArray @@ -475,7 +474,7 @@ "PyLong_Type": "space.w_int", "PyComplex_Type": "space.w_complex", "PyByteArray_Type": "space.w_bytearray", - "PyMemoryView_Type": "space.gettypeobject(W_MemoryView.typedef)", + "PyMemoryView_Type": "space.w_memoryview", "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", "PyBaseObject_Type": "space.w_object", 'PyNone_Type': 'space.type(space.w_None)', diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -15,8 +15,8 @@ from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments +from rpython.rlib.buffer import Buffer from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize from rpython.tool.sourcetools import func_renamer @@ -230,11 +230,13 @@ class CPyBuffer(Buffer): # Similar to Py_buffer + _immutable_ = True def __init__(self, ptr, size, w_obj): self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive + self.readonly = True def getlength(self): return self.size diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -54,7 +54,7 @@ module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) # XXX big-endian - assert memoryview(arr).tobytes() == (b'\x01\0\0\0' - b'\x02\0\0\0' - b'\x03\0\0\0' - b'\x04\0\0\0') + assert bytes(arr) == (b'\x01\0\0\0' + b'\x02\0\0\0' + b'\x03\0\0\0' + b'\x04\0\0\0') diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -1,6 +1,6 @@ from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, wrap_oserror, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -92,38 +92,27 @@ op = rffi.cast(rffi.INT, op) # C long => C int try: - intarg = space.int_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = fcntl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "fcntl") - return space.wrap(rv) + ll_arg = rffi.str2charp(arg) + try: + rv = fcntl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "fcntl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - try: - arg = space.bufferstr_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - try: - arg = space.str_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, space.wrap( - "int or string or buffer required")) - - ll_arg = rffi.str2charp(arg) - rv = fcntl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = fcntl_int(fd, op, intarg) if rv < 0: raise _get_error(space, "fcntl") - return space.wrapbytes(arg) - + return space.wrapbytes(rv) @unwrap_spec(op=int) def flock(space, w_fd, op): @@ -212,55 +201,50 @@ fd = space.c_filedescriptor_w(w_fd) op = rffi.cast(rffi.INT, op) # C long => C int - if mutate_flag != 0: - try: - rwbuffer = space.rwbuffer_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - if mutate_flag > 0: - raise - else: - arg = rwbuffer.as_str() - ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') - if rv < 0: - raise _get_error(space, "ioctl") - rwbuffer.setslice(0, arg) - return space.wrap(rv) - try: - intarg = space.int_w(w_arg) + rwbuffer = space.writebuf_w(w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - intarg = rffi.cast(rffi.INT, intarg) # C long => C int - rv = ioctl_int(fd, op, intarg) - if rv < 0: - raise _get_error(space, "ioctl") - return space.wrap(rv) + arg = rwbuffer.as_str() + ll_arg = rffi.str2charp(arg) + try: + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + if mutate_flag != 0: + rwbuffer.setslice(0, arg) + return space.wrap(rv) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') + + if mutate_flag != -1: + raise OperationError(space.w_TypeError, space.wrap( + "ioctl requires a file or file descriptor, an integer " + "and optionally an integer or buffer argument")) try: - arg = space.bufferstr_w(w_arg) + arg = space.getarg_w('s#', w_arg) except OperationError, e: if not e.match(space, space.w_TypeError): raise + else: + ll_arg = rffi.str2charp(arg) try: - arg = space.str_w(w_arg) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError( - space.w_TypeError, - space.wrap("int or string or buffer required")) + rv = ioctl_str(fd, op, ll_arg) + if rv < 0: + raise _get_error(space, "ioctl") + arg = rffi.charpsize2str(ll_arg, len(arg)) + return space.wrap(arg) + finally: + lltype.free(ll_arg, flavor='raw') - ll_arg = rffi.str2charp(arg) - rv = ioctl_str(fd, op, ll_arg) - arg = rffi.charpsize2str(ll_arg, len(arg)) - lltype.free(ll_arg, flavor='raw') + intarg = space.int_w(w_arg) + intarg = rffi.cast(rffi.INT, intarg) # C long => C int + rv = ioctl_int(fd, op, intarg) if rv < 0: raise _get_error(space, "ioctl") - return space.wrapbytes(arg) + return space.wrapbytes(rv) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -47,6 +47,8 @@ assert fcntl.fcntl(f, 1, 0) == 0 assert fcntl.fcntl(f, 2, "foo") == b"foo" assert fcntl.fcntl(f, 2, memoryview(b"foo")) == b"foo" + exc = raises(TypeError, fcntl.fcntl, f, 2, memoryview(b"foo")) + assert 'integer' in str(exc.value) try: os.O_LARGEFILE @@ -222,6 +224,16 @@ assert res == 0 assert buf.tostring() == expected + buf = array.array('i', [0]) + res = fcntl.ioctl(mfd, TIOCGPGRP, buffer(buf)) + assert res == expected + assert buf.tostring() == b'\x00' * 4 + + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview(b'abc')) + assert 'integer' in str(exc.value) + exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview(b'abc'), False) + assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" + res = fcntl.ioctl(mfd, TIOCGPGRP, buf, False) assert res == expected diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -223,7 +223,7 @@ space = self.space if space.type(w_obj).is_heaptype(): try: - buf = space.buffer_w(w_obj) + buf = space.readbuf_w(w_obj) except OperationError as e: if not e.match(space, space.w_TypeError): raise @@ -466,13 +466,7 @@ # Unmarshaller with inlined buffer string def __init__(self, space, w_str): Unmarshaller.__init__(self, space, None) - try: - self.bufstr = space.bufferstr_w(w_str) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, space.wrap( - 'marshal.loads() arg must be string or buffer')) + self.bufstr = space.getarg_w('s#', w_str) self.bufpos = 0 self.limit = len(self.bufstr) diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -16,6 +16,9 @@ x = marshal.loads(s) assert x == case and type(x) is type(case) + exc = raises(TypeError, marshal.loads, memoryview(s)) + assert str(exc.value) == "must be string or read-only buffer, not memoryview" + import sys if '__pypy__' in sys.builtin_module_names: f = StringIO.StringIO() diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -25,6 +25,8 @@ s = marshal.dumps(array.array('b', b'asd')) t = marshal.loads(s) assert type(t) is bytes and t == b'asd' + exc = raises(ValueError, marshal.dumps, memoryview(b'asd')) + assert str(exc.value) == "unmarshallable object" def test_unmarshal_evil_long(self): import marshal diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -337,8 +337,14 @@ def descr_copy(self, space): return self.convert_to(space, self.get_dtype(space)) - def buffer_w(self, space): - return self.descr_ravel(space).buffer_w(space) + def buffer_w(self, space, flags): + return self.descr_ravel(space).buffer_w(space, flags) + + def readbuf_w(self, space): + return self.descr_ravel(space).readbuf_w(space) + + def charbuf_w(self, space): + return self.descr_ravel(space).charbuf_w(space) def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,6 +1,6 @@ -from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit +from rpython.rlib.buffer import Buffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE @@ -316,8 +316,8 @@ def get_storage(self): return self.storage - def get_buffer(self, space): - return ArrayBuffer(self) + def get_buffer(self, space, readonly): + return ArrayBuffer(self, readonly) def astype(self, space, dtype): strides, backstrides = calc_strides(self.get_shape(), dtype, @@ -471,9 +471,12 @@ free_raw_storage(self.storage) -class ArrayBuffer(RWBuffer): - def __init__(self, impl): +class ArrayBuffer(Buffer): + _immutable_ = True + + def __init__(self, impl, readonly): self.impl = impl + self.readonly = readonly def getitem(self, item): return raw_storage_getitem(lltype.Char, self.impl.storage, item) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -610,11 +610,20 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ctypes not implemented yet")) - def buffer_w(self, space): - return self.implementation.get_buffer(space) + def buffer_w(self, space, flags): + return self.implementation.get_buffer(space, True) + + def readbuf_w(self, space): + return self.implementation.get_buffer(space, True) + + def writebuf_w(self, space): + return self.implementation.get_buffer(space, False) + + def charbuf_w(self, space): + return self.implementation.get_buffer(space, True).as_str() def descr_get_data(self, space): - return space.newbuffer(self.buffer_w(space)) + return space.newbuffer(self.implementation.get_buffer(space, False)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -1178,7 +1187,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) - buf = space.buffer_w(w_buffer) + try: + buf = space.writebuf_w(w_buffer) + except OperationError: + buf = space.readbuf_w(w_buffer) try: raw_ptr = buf.get_raw_address() except ValueError: @@ -1196,7 +1208,7 @@ return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, w_subtype=w_subtype, w_base=w_buffer, - writable=buf.is_writable()) + writable=not buf.readonly) order = order_converter(space, w_order, NPY.CORDER) if order == NPY.CORDER: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -347,6 +347,9 @@ a = np.array([1,2,3]) b = buffer(a) assert type(b) is buffer + assert 'read-only buffer' in repr(b) + exc = raises(TypeError, "b[0] = '0'") + assert str(exc.value) == 'buffer is read-only' def test_type(self): from numpypy import array @@ -2242,6 +2245,7 @@ a.data[4] = '\xff' assert a[1] == 0xff assert len(a.data) == 16 + assert type(a.data) is buffer def test_explicit_dtype_conversion(self): from numpypy import array diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -2,8 +2,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.buffer import RWBuffer from rpython.rlib import rmmap, rarithmetic +from rpython.rlib.buffer import Buffer from rpython.rlib.rmmap import RValueError, RTypeError, RMMapError if rmmap.HAVE_LARGEFILE_SUPPORT: @@ -17,9 +17,9 @@ self.space = space self.mmap = mmap_obj - def buffer_w(self, space): + def readbuf_w(self, space): self.check_valid() - return MMapBuffer(self.space, self.mmap) + return MMapBuffer(self.space, self.mmap, True) def close(self): self.mmap.close() @@ -304,10 +304,13 @@ mmap_error._dont_inline_ = True -class MMapBuffer(RWBuffer): - def __init__(self, space, mmap): +class MMapBuffer(Buffer): + _immutable_ = True + + def __init__(self, space, mmap, readonly): self.space = space self.mmap = mmap + self.readonly = readonly def getlength(self): return self.mmap.size @@ -321,7 +324,7 @@ if step == 1: return self.mmap.getslice(start, size) else: - return RWBuffer.getslice(self, start, stop, step, size) + return Buffer.getslice(self, start, stop, step, size) def setitem(self, index, char): self.check_valid_writeable() @@ -331,14 +334,6 @@ self.check_valid_writeable() self.mmap.setslice(start, string) - def is_writable(self): - try: - self.mmap.check_writeable() - except RMMapError: - return False - else: - return True - def get_raw_address(self): self.check_valid() return self.mmap.data diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -542,20 +542,16 @@ m.close() f.close() - def test_buffer_write(self): + def test_memoryview(self): from mmap import mmap - f = open(self.tmpname + "y", "wb+") - f.write(b"foobar") + f = open(self.tmpname + "y", "w+") + f.write("foobar") f.flush() m = mmap(f.fileno(), 6) - m[5] = ord('?') - b = memoryview(m) - b[:3] = b"FOO" - del b # For CPython: "cannot close exported pointers exist" + m[5] = '?' + exc = raises(TypeError, memoryview, m) + assert 'buffer interface' in str(exc.value) m.close() - f.seek(0) - got = f.read() - assert got == b"FOOba?" f.close() def test_offset(self): diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -50,7 +50,9 @@ interpleveldefs = { 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', + 'pack_into': 'interp_struct.pack_into', 'unpack': 'interp_struct.unpack', + 'unpack_from': 'interp_struct.unpack_from', '_clearcache': 'interp_struct.clearcache', 'Struct': 'interp_struct.W_Struct', @@ -58,6 +60,4 @@ appleveldefs = { 'error': 'app_struct.error', - 'pack_into': 'app_struct.pack_into', - 'unpack_from': 'app_struct.unpack_from', } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -2,23 +2,8 @@ """ Application-level definitions for the struct module. """ -import _struct as struct class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" - -# XXX inefficient -def pack_into(fmt, buf, offset, *args): - data = struct.pack(fmt, *args) - memoryview(buf)[offset:offset+len(data)] = data - -# XXX inefficient -def unpack_from(fmt, buf, offset=0): - size = struct.calcsize(fmt) - data = memoryview(buf)[offset:offset+size] - if len(data) != size: - raise error("unpack_from requires a buffer of at least %d bytes" - % (size,)) - return struct.unpack(fmt, data) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -5,7 +5,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.module.struct.formatiterator import ( PackFormatIterator, UnpackFormatIterator @@ -29,6 +29,7 @@ raise OperationError(w_error, space.wrap(e.msg)) return fmtiter.totalsize + @unwrap_spec(format=str) def pack(space, format, args_w): if jit.isconstant(format): @@ -47,6 +48,23 @@ return space.wrapbytes(fmtiter.result.build()) +# XXX inefficient + at unwrap_spec(format=str, offset=int) +def pack_into(space, format, w_buf, offset, args_w): + res = pack(space, format, args_w).str_w(space) + buf = space.writebuf_w(w_buf) + if offset < 0: + offset += buf.getlength() + size = len(res) + if offset < 0 or (buf.getlength() - offset) < size: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, + "pack_into requires a buffer of at least %d bytes", + size) + buf.setslice(offset, res) + + @unwrap_spec(format=str, input='bufferstr') def unpack(space, format, input): fmtiter = UnpackFormatIterator(space, input) @@ -65,6 +83,27 @@ # No cache in this implementation +# XXX inefficient + at unwrap_spec(format=str, offset=int) +def unpack_from(space, format, w_buf, offset=0): + size = _calcsize(space, format) + buf = space.getarg_w('z*', w_buf) + if buf is None: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, "unpack_from requires a buffer argument") + if offset < 0: + offset += buf.getlength() + if offset < 0 or (buf.getlength() - offset) < size: + w_module = space.getbuiltinmodule('struct') + w_error = space.getattr(w_module, space.wrap('error')) + raise oefmt(w_error, + "unpack_from requires a buffer of at least %d bytes", + size) + data = buf.getslice(offset, offset + size, 1, size) + return unpack(space, format, data) + + class W_Struct(W_Root): _immutable_fields_ = ["format", "size"] diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -2,7 +2,6 @@ Tests for the struct module implemented at interp-level in pypy/module/struct. """ -import py from rpython.rlib.rstruct.nativefmttable import native_is_bigendian @@ -26,7 +25,6 @@ """ assert issubclass(self.struct.error, Exception) - def test_calcsize_standard(self): """ Check the standard size of the various format characters. @@ -52,14 +50,12 @@ # test with some repetitions and multiple format characters assert calcsize('=bQ3i') == 1 + 8 + 3*4 - def test_index(self): class X(object): def __index__(self): return 3 assert self.struct.unpack("i", self.struct.pack("i", X()))[0] == 3 - def test_pack_standard_little(self): """ Check packing with the '<' format specifier. @@ -73,7 +69,6 @@ assert pack("' format specifier. @@ -101,7 +95,6 @@ assert pack(">q", -0x41B2B3B4B5B6B7B8) == b'\xbeMLKJIHH' assert pack(">Q", 0x8142434445464748) == b'\x81BCDEFGH' - def test_unpack_standard_big(self): """ Check unpacking with the '>' format specifier. @@ -115,7 +108,6 @@ assert unpack(">q", b'\xbeMLKJIHH') == (-0x41B2B3B4B5B6B7B8,) assert unpack(">Q", b'\x81BCDEFGH') == (0x8142434445464748,) - def test_calcsize_native(self): """ Check that the size of the various format characters is reasonable. @@ -145,7 +137,6 @@ assert calcsize('ibb') == calcsize('i') + 2 * calcsize('b') assert calcsize('ih') == calcsize('i') + calcsize('h') - def test_pack_native(self): """ Check packing with the native format. @@ -163,7 +154,6 @@ assert res[sizeofi:] == b'\x05' + b'\x00' * (sizeofi-1) assert pack("q", -1) == b'\xff' * calcsize("q") - def test_unpack_native(self): """ Check unpacking with the native format. @@ -174,7 +164,6 @@ assert unpack("bi", pack("bi", -2, 5)) == (-2, 5) assert unpack("q", b'\xff' * calcsize("q")) == (-1,) - def test_string_format(self): """ Check the 's' format character. @@ -189,7 +178,6 @@ assert unpack("5s3s", b"worldspa") == (b"world", b"spa") assert unpack("0s", b"") == (b"",) - def test_pascal_format(self): """ Check the 'p' format character. @@ -209,7 +197,6 @@ assert unpack("1p", b"\x03") == (b"",) assert unpack("300p", longpacked300) == (longstring[:255],) - def test_char_format(self): """ Check the 'c' format character. @@ -221,7 +208,6 @@ assert unpack("c", b"?") == (b"?",) assert unpack("5c", b"a\xc0\x00\n-") == (b"a", b"\xc0", b"\x00", b"\n", b"-") - def test_pad_format(self): """ Check the 'x' format character. @@ -233,7 +219,6 @@ assert unpack("x", b"?") == () assert unpack("5x", b"hello") == () - def test_native_floats(self): """ Check the 'd' and 'f' format characters on native packing. @@ -250,7 +235,6 @@ assert res != 12.34 # precision lost assert abs(res - 12.34) < 1E-6 - def test_standard_floats(self): """ Check the 'd' and 'f' format characters on standard packing. @@ -269,7 +253,6 @@ def test_bool(self): pack = self.struct.pack - unpack = self.struct.unpack assert pack("!?", True) == b'\x01' assert pack(">?", True) == b'\x01' assert pack("!?", False) == b'\x00' @@ -332,15 +315,12 @@ raises(error, pack, "b", 150) # argument out of range # XXX the accepted ranges still differs between PyPy and CPython - def test_overflow_error(self): """ Check OverflowError cases. """ import sys calcsize = self.struct.calcsize - pack = self.struct.pack - unpack = self.struct.unpack someerror = (OverflowError, self.struct.error) raises(someerror, calcsize, "%dc" % (sys.maxsize+1,)) raises(someerror, calcsize, "999999999999999999999999999c") @@ -349,7 +329,6 @@ raises(someerror, calcsize, "c%dc" % (sys.maxsize,)) raises(someerror, calcsize, "%dci" % (sys.maxsize,)) - def test_unicode(self): """ A PyPy extension: accepts the 'u' format character in native mode, @@ -365,7 +344,6 @@ assert data == b'X\x00\x00\x00Y\x00\x00\x00Z\x00\x00\x00' assert self.struct.unpack("uuu", data) == ('X', 'Y', 'Z') - def test_unpack_memoryview(self): """ memoryview objects can be passed to struct.unpack(). @@ -374,6 +352,36 @@ assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) + def test_pack_unpack_buffer(self): + import array + b = array.array('c', '\x00' * 19) + sz = self.struct.calcsize("ii") + for offset in [2, -17]: + self.struct.pack_into("ii", b, offset, 17, 42) + assert str(buffer(b)) == ('\x00' * 2 + + self.struct.pack("ii", 17, 42) + + '\x00' * (19-sz-2)) + exc = raises(TypeError, self.struct.pack_into, "ii", buffer(b), 0, 17, 42) + assert str(exc.value) == "buffer is read-only" + exc = raises(TypeError, self.struct.pack_into, "ii", 'test', 0, 17, 42) + assert str(exc.value) == "Cannot use string as modifiable buffer" + exc = raises(self.struct.error, self.struct.pack_into, "ii", b[0:1], 0, 17, 42) + assert str(exc.value) == "pack_into requires a buffer of at least 8 bytes" + + assert self.struct.unpack_from("ii", b, 2) == (17, 42) + assert self.struct.unpack_from("ii", b, -17) == (17, 42) + assert self.struct.unpack_from("ii", buffer(b, 2)) == (17, 42) + assert self.struct.unpack_from("ii", buffer(b), 2) == (17, 42) + assert self.struct.unpack_from("ii", memoryview(buffer(b)), 2) == (17, 42) + exc = raises(TypeError, self.struct.unpack_from, "ii", 123) + assert 'must be string or buffer, not int' in str(exc.value) + exc = raises(self.struct.error, self.struct.unpack_from, "ii", None) + assert str(exc.value) == "unpack_from requires a buffer argument" + exc = raises(self.struct.error, self.struct.unpack_from, "ii", '') + assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" + exc = raises(self.struct.error, self.struct.unpack_from, "ii", memoryview('')) + assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" + def test___float__(self): class MyFloat(object): def __init__(self, x): diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -4,6 +4,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.sliceobject import W_SliceObject +from rpython.rlib.buffer import StringBuffer from rpython.rlib.objectmodel import instantiate, we_are_translated, specialize from rpython.rlib.nonconst import NonConstant from rpython.rlib.rarithmetic import r_uint, r_singlefloat @@ -39,6 +40,9 @@ def setclass(self, space, w_subtype): is_root(w_subtype) + def buffer_w(self, space, flags): + return StringBuffer("foobar") + def str_w(self, space): return NonConstant("foobar") identifier_w = bytes_w = str_w @@ -70,6 +74,9 @@ def get_module(self): return w_some_obj() + def get_module_type_name(self): + return self.name + def w_some_obj(): if NonConstant(False): return W_Root() @@ -305,11 +312,6 @@ ec._py_repr = None return ec - def buffer_w(self, w_obj): - from pypy.interpreter.buffer import Buffer - is_root(w_obj) - return Buffer() - def unicode_from_object(self, w_obj): return w_some_obj() diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/bufferobject.py @@ -0,0 +1,159 @@ +""" +Implementation of the 'buffer' and 'memoryview' types. +""" +import operator + +from rpython.rlib.buffer import Buffer, StringBuffer, SubBuffer +from pypy.interpreter.baseobjspace import W_Root From noreply at buildbot.pypy.org Wed Apr 30 01:11:58 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:11:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k: rekill buffer and some py2 only tests Message-ID: <20140429231158.E44981C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71071:38c291084e78 Date: 2014-04-29 16:07 -0700 http://bitbucket.org/pypy/pypy/changeset/38c291084e78/ Log: rekill buffer and some py2 only tests diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py deleted file mode 100644 --- a/pypy/objspace/std/bufferobject.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Implementation of the 'buffer' and 'memoryview' types. -""" -import operator - -from rpython.rlib.buffer import Buffer, StringBuffer, SubBuffer -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from rpython.rlib.objectmodel import compute_hash -from rpython.rlib.rstring import StringBuilder - - -class W_Buffer(W_Root): - """Implement the built-in 'buffer' type as a wrapper around - an interp-level buffer. - """ - - def __init__(self, buf): - assert isinstance(buf, Buffer) - self.buf = buf - - def buffer_w(self, space, flags): - space.check_buf_flags(flags, self.buf.readonly) - return self.buf - - def readbuf_w(self, space): - return self.buf - - def writebuf_w(self, space): - if self.buf.readonly: - raise OperationError(space.w_TypeError, space.wrap( - "buffer is read-only")) - return self.buf - - def charbuf_w(self, space): - return self.buf.as_str() - - @staticmethod - @unwrap_spec(offset=int, size=int) - def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): - buf = space.readbuf_w(w_object) - if offset == 0 and size == -1: - return W_Buffer(buf) - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - buf = SubBuffer(buf, offset, size) - return W_Buffer(buf) - - def descr_len(self, space): - return space.wrap(self.buf.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) - if step == 0: # index only - return space.wrap(self.buf.getitem(start)) - res = self.buf.getslice(start, stop, step, size) - return space.wrap(res) - - def descr_setitem(self, space, w_index, w_obj): - if self.buf.readonly: - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) - value = space.readbuf_w(w_obj) - if step == 0: # index only - if value.getlength() != 1: - msg = "right operand must be a single byte" - raise OperationError(space.w_TypeError, space.wrap(msg)) - self.buf.setitem(start, value.getitem(0)) - else: - if value.getlength() != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_TypeError, space.wrap(msg)) - if step == 1: - self.buf.setslice(start, value.as_str()) - else: - for i in range(size): - self.buf.setitem(start + i * step, value.getitem(i)) - - def descr_str(self, space): - return space.wrap(self.buf.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrap(self.buf.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, W_Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.buf.as_str() - str2 = w_other.buf.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.buf.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrap(self.buf.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if self.buf.readonly: - info = 'read-only buffer' - else: - info = 'read-write buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.buf.getlength())) - -W_Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(W_Buffer.descr_new_buffer), - __len__ = interp2app(W_Buffer.descr_len), - __getitem__ = interp2app(W_Buffer.descr_getitem), - __setitem__ = interp2app(W_Buffer.descr_setitem), - __str__ = interp2app(W_Buffer.descr_str), - __add__ = interp2app(W_Buffer.descr_add), - __eq__ = interp2app(W_Buffer.descr_eq), - __ne__ = interp2app(W_Buffer.descr_ne), - __lt__ = interp2app(W_Buffer.descr_lt), - __le__ = interp2app(W_Buffer.descr_le), - __gt__ = interp2app(W_Buffer.descr_gt), - __ge__ = interp2app(W_Buffer.descr_ge), - __hash__ = interp2app(W_Buffer.descr_hash), - __mul__ = interp2app(W_Buffer.descr_mul), - __rmul__ = interp2app(W_Buffer.descr_mul), - __repr__ = interp2app(W_Buffer.descr_repr), -) -W_Buffer.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/test/test_bufferobject.py b/pypy/objspace/std/test/test_bufferobject.py deleted file mode 100644 --- a/pypy/objspace/std/test/test_bufferobject.py +++ /dev/null @@ -1,199 +0,0 @@ -class AppTestBuffer: - spaceconfig = dict(usemodules=['array']) - - def test_init(self): - import sys - class A(object): - def __buffer__(self): - return buffer('123') - if '__pypy__' not in sys.builtin_module_names: - raises(TypeError, buffer, A()) - else: - assert buffer(A()) == buffer('123') - - def test_unicode_buffer(self): - import sys - b = buffer(u"ab") - if sys.maxunicode == 65535: # UCS2 build - assert len(b) == 4 - if sys.byteorder == "big": - assert b[0:4] == "\x00a\x00b" - else: - assert b[0:4] == "a\x00b\x00" - else: # UCS4 build - assert len(b) == 8 - if sys.byteorder == "big": - assert b[0:8] == "\x00\x00\x00a\x00\x00\x00b" - else: - assert b[0:8] == "a\x00\x00\x00b\x00\x00\x00" - - def test_array_buffer(self): - import array - b = buffer(array.array("B", [1, 2, 3])) - assert len(b) == 3 - assert b[0:3] == "\x01\x02\x03" - - def test_nonzero(self): - assert buffer('\x00') - assert not buffer('') - import array - assert buffer(array.array("B", [0])) - assert not buffer(array.array("B", [])) - - def test_str(self): - assert str(buffer('hello')) == 'hello' - - def test_repr(self): - # from 2.5.2 lib tests - assert repr(buffer('hello')).startswith(' buffer('ab')) - assert buffer('ab') >= buffer('ab') - assert buffer('ab') != buffer('abc') - assert buffer('ab') < buffer('abc') - assert buffer('ab') <= buffer('ab') - assert buffer('ab') > buffer('aa') - assert buffer('ab') >= buffer('ab') - - def test_hash(self): - assert hash(buffer('hello')) == hash('hello') - - def test_mul(self): - assert buffer('ab') * 5 == 'ababababab' - assert buffer('ab') * (-2) == '' - assert 5 * buffer('ab') == 'ababababab' - assert (-2) * buffer('ab') == '' - - def test_offset_size(self): - b = buffer('hello world', 6) - assert len(b) == 5 - assert b[0] == 'w' - assert b[:] == 'world' - raises(IndexError, 'b[5]') - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == 'r' - assert b[:] == 'rld' - raises(IndexError, 'b[3]') - b = buffer('hello world', 1, 8) - assert len(b) == 8 - assert b[0] == 'e' - assert b[:] == 'ello wor' - raises(IndexError, 'b[8]') - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == ' ' - assert b[:] == 'lo ' - raises(IndexError, 'b[3]') - b = buffer('hello world', 55) - assert len(b) == 0 - assert b[:] == '' - b = buffer('hello world', 6, 999) - assert len(b) == 5 - assert b[:] == 'world' - - raises(ValueError, buffer, "abc", -1) - raises(ValueError, buffer, "abc", 0, -2) - - def test_rw_offset_size(self): - import array - - a = array.array("c", 'hello world') - b = buffer(a, 6) - assert len(b) == 5 - assert b[0] == 'w' - assert b[:] == 'world' - raises(IndexError, 'b[5]') - exc = raises(TypeError, "b[0] = 'W'") - assert str(exc.value) == "buffer is read-only" - exc = raises(TypeError, "b[:] = '12345'") - assert str(exc.value) == "buffer is read-only" - exc = raises(TypeError, 'b[5] = "."') - assert str(exc.value) == "buffer is read-only" - exc = raises(TypeError, "b[4:2] = ''") - assert str(exc.value) == "buffer is read-only" - assert str(b) == 'world' - assert a.tostring() == 'hello world' - - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == 'r' - assert b[:] == 'rld' - raises(IndexError, 'b[3]') - exc = raises(TypeError, "b[1] = 'X'") - assert str(exc.value) == "buffer is read-only" - exc = raises(TypeError, 'b[3] = "."') - assert str(exc.value) == "buffer is read-only" - assert a.tostring() == 'hello world' - - a = array.array("c", 'hello world') - b = buffer(a, 1, 8) - assert len(b) == 8 - assert b[0] == 'e' - assert b[:] == 'ello wor' - raises(IndexError, 'b[8]') - exc = raises(TypeError, "b[0] = 'E'") - assert str(exc.value) == "buffer is read-only" - assert str(b) == 'ello wor' - assert a.tostring() == 'hello world' - exc = raises(TypeError, "b[:] = '12345678'") - assert str(exc.value) == "buffer is read-only" - assert a.tostring() == 'hello world' - exc = raises(TypeError, 'b[8] = "."') - assert str(exc.value) == "buffer is read-only" - - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == ' ' - assert b[:] == 'lo ' - raises(IndexError, 'b[3]') - exc = raises(TypeError, "b[1] = 'X'") - assert str(exc.value) == "buffer is read-only" - assert a.tostring() == 'hello world' - exc = raises(TypeError, 'b[3] = "."') - assert str(exc.value) == "buffer is read-only" - - b = buffer(a, 55) - assert len(b) == 0 - assert b[:] == '' - b = buffer(a, 6, 999) - assert len(b) == 5 - assert b[:] == 'world' - - raises(ValueError, buffer, a, -1) - raises(ValueError, buffer, a, 0, -2) - - def test_slice(self): - # Test extended slicing by comparing with list slicing. - s = "".join(chr(c) for c in list(range(255, -1, -1))) - b = buffer(s) - indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300) - for start in indices: - for stop in indices: - # Skip step 0 (invalid) - for step in indices[1:]: - assert b[start:stop:step] == s[start:stop:step] - - def test_getitem_only_ints(self): - class MyInt(object): - def __init__(self, x): - self.x = x - - def __int__(self): - return self.x - - buf = buffer('hello world') - raises(TypeError, "buf[MyInt(0)]") - raises(TypeError, "buf[MyInt(0):MyInt(5)]") diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -145,10 +145,8 @@ assert repr(float("+nan")) == "nan" assert repr(float("-nAn")) == "nan" - assert float(buffer("inf")) == inf - assert float(bytearray("inf")) == inf - exc = raises(TypeError, float, memoryview("inf")) - assert str(exc.value) == "float() argument must be a string or a number" + assert float(memoryview(b"inf")) == inf + assert float(bytearray(b"inf")) == inf def test_float_unicode(self): # u00A0 and u2000 are some kind of spaces diff --git a/pypy/objspace/std/test/test_strbufobject.py b/pypy/objspace/std/test/test_strbufobject.py --- a/pypy/objspace/std/test/test_strbufobject.py +++ b/pypy/objspace/std/test/test_strbufobject.py @@ -46,7 +46,6 @@ def test_buffer(self): s = 'a'.__add__('b') - assert buffer(s) == buffer('ab') assert memoryview(s) == 'ab' def test_add_strbuf(self): From noreply at buildbot.pypy.org Wed Apr 30 01:12:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:00 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix imports Message-ID: <20140429231200.1F9571C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71072:7a6847bedfb0 Date: 2014-04-29 16:08 -0700 http://bitbucket.org/pypy/pypy/changeset/7a6847bedfb0/ Log: fix imports diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -3,7 +3,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import _buffer_setitem +from pypy.objspace.std.memoryobject import _buffer_setitem from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -2,16 +2,16 @@ from pypy.interpreter.typedef import ( TypeDef, generic_new_descr, GetSetProperty) from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.buffer import RWBuffer +from rpython.rlib.buffer import Buffer from rpython.rlib.rStringIO import RStringIO from rpython.rlib.rarithmetic import r_longlong from pypy.module._io.interp_bufferedio import W_BufferedIOBase from pypy.module._io.interp_iobase import convert_size -from pypy.objspace.std.memoryview import W_MemoryView +from pypy.objspace.std.memoryobject import W_MemoryView import sys -class BytesIOBuffer(RWBuffer): +class BytesIOBuffer(Buffer): def __init__(self, w_bytesio): self.w_bytesio = w_bytesio diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib import buffer from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer) from pypy.module.cpyext.pyobject import PyObject, Py_DecRef -from pypy.interpreter import buffer @cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) def PyBuffer_IsContiguous(space, view, fortran): diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.api import Py_buffer, cpython_api from pypy.module.cpyext.pyobject import PyObject, from_ref from pypy.module.cpyext.buffer import CBuffer -from pypy.objspace.std.memoryview import W_MemoryView +from pypy.objspace.std.memoryobject import W_MemoryView @cpython_api([PyObject], PyObject) def PyMemoryView_FromObject(space, w_obj): From noreply at buildbot.pypy.org Wed Apr 30 01:12:01 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:01 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt buffer refactor to py3: Message-ID: <20140429231201.676271C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71073:02d3748ca5f6 Date: 2014-04-29 16:08 -0700 http://bitbucket.org/pypy/pypy/changeset/02d3748ca5f6/ Log: adapt buffer refactor to py3: o have the old buf methods fallback to the new interface o fix getarg_w unicode handling o kill bufferstr0_new_w diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -3,6 +3,7 @@ from rpython.rlib.cache import Cache from rpython.tool.uid import HUGEVAL_BYTES from rpython.rlib import jit, types +from rpython.rlib.buffer import StringBuffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, compute_unique_id, specialize) @@ -207,7 +208,7 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_memoryview): return w_result.readbuf_w(space) - raise TypeError + return self.buffer_w(space, space.BUF_SIMPLE) def writebuf_w(self, space): w_impl = space.lookup(self, '__buffer__') @@ -215,7 +216,7 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_memoryview): return w_result.writebuf_w(space) - raise TypeError + return self.buffer_w(space, space.BUF_WRITABLE) def charbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') @@ -223,7 +224,7 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_memoryview): return w_result.charbuf_w(space) - raise TypeError + return self.buffer_w(space, space.BUF_SIMPLE).as_str() def bytes_w(self, space): self._typed_unwrap_error(space, "bytes") @@ -1406,10 +1407,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - name = "None" + e = oefmt(self.w_TypeError, "must be %s, not None", expected) else: - name = self.type(w_obj).get_module_type_name() - raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + raise e @specialize.arg(1) def getarg_w(self, code, w_obj): @@ -1421,7 +1422,7 @@ if self.isinstance_w(w_obj, self.w_str): return w_obj.readbuf_w(self) if self.isinstance_w(w_obj, self.w_unicode): - return self.str(w_obj).readbuf_w(self) + return StringBuffer(w_obj.identifier_w(self)) try: return w_obj.buffer_w(self, 0) except TypeError: @@ -1432,9 +1433,9 @@ self._getarg_error("string or buffer", w_obj) elif code == 's#': if self.isinstance_w(w_obj, self.w_str): - return w_obj.str_w(self) + return w_obj.bytes_w(self) if self.isinstance_w(w_obj, self.w_unicode): - return self.str(w_obj).str_w(self) + return w_obj.identifier_w(self) try: return w_obj.readbuf_w(self).as_str() except TypeError: @@ -1459,14 +1460,6 @@ else: assert False - def bufferstr0_new_w(self, w_obj): - from rpython.rlib import rstring - result = self.bufferstr_new_w(w_obj) - if '\x00' in result: - raise OperationError(self.w_TypeError, self.wrap( - 'argument must be a string without NUL characters')) - return rstring.assert_str0(result) - # XXX rename/replace with code more like CPython getargs for buffers def bufferstr_w(self, w_obj): # Directly returns an interp-level str. Note that if w_obj is a diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -4,7 +4,7 @@ The rest, dealing with variables in optimized ways, is in nestedscope.py. """ -from rpython.rlib import jit, rstackovf +from rpython.rlib import jit, rstackovf, rstring from rpython.rlib.debug import check_nonneg from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_uint, intmask @@ -1556,12 +1556,13 @@ source = space.bytes0_w(w_source) else: try: - source = space.bufferstr0_new_w(w_source) + buf = space.buffer_w(w_source, space.BUF_SIMPLE) except OperationError as e: if not e.match(space, space.w_TypeError): raise raise oefmt(space.w_TypeError, "%s() arg 1 must be a %s object", funcname, what) + source = rstring.assert_str0(buf.as_str()) return source, flags From noreply at buildbot.pypy.org Wed Apr 30 01:12:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:02 +0200 (CEST) Subject: [pypy-commit] pypy py3k: buffer_w now takes flags, Buffers must now be _immutable_ Message-ID: <20140429231202.AEAE41C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71074:cc37714daab8 Date: 2014-04-29 16:08 -0700 http://bitbucket.org/pypy/pypy/changeset/cc37714daab8/ Log: buffer_w now takes flags, Buffers must now be _immutable_ diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -48,7 +48,7 @@ self.buffer = buffer self.keepalive = keepalive - def buffer_w(self, space): + def buffer_w(self, space, flags): return self.buffer def descr_len(self, space): diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -12,6 +12,8 @@ class BytesIOBuffer(Buffer): + _immutable_ = True + def __init__(self, w_bytesio): self.w_bytesio = w_bytesio diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -36,5 +36,7 @@ self.c_len) class CBuffer(CBufferMixin, buffer.Buffer): + _immutable_ = True + def __del__(self): CBufferMixin.destructor(self) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -253,7 +253,7 @@ the buffer protocol.""" if space.is_w(space.type(w_obj), space.w_bytes): return w_obj - buffer = space.buffer_w(w_obj) + buffer = space.buffer_w(w_obj, space.BUF_FULL_RO) return space.wrapbytes(buffer.as_str()) From noreply at buildbot.pypy.org Wed Apr 30 01:12:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: buffer fixes for py3k unicode/bytes Message-ID: <20140429231203.E80C81C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71075:7d6cb4c95b5e Date: 2014-04-29 16:09 -0700 http://bitbucket.org/pypy/pypy/changeset/7d6cb4c95b5e/ Log: buffer fixes for py3k unicode/bytes diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -51,7 +51,7 @@ # XXX inefficient @unwrap_spec(format=str, offset=int) def pack_into(space, format, w_buf, offset, args_w): - res = pack(space, format, args_w).str_w(space) + res = pack(space, format, args_w).bytes_w(space) buf = space.writebuf_w(w_buf) if offset < 0: offset += buf.getlength() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -436,7 +436,7 @@ @staticmethod def _op_val(space, w_other): try: - return space.str_w(w_other) + return space.bytes_w(w_other) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -740,12 +740,12 @@ # String-like argument try: - string = space.bufferstr_new_w(w_source) + buf = space.buffer_w(w_source, space.BUF_FULL_RO) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - return [c for c in string] + return [c for c in buf.as_str()] if space.isinstance_w(w_source, space.w_unicode): raise OperationError( diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -72,19 +72,6 @@ self._utf8 = identifier return identifier - def readbuf_w(self, space): - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - builder = StringBuilder(len(self._value) * UNICODE_SIZE) - for unich in self._value: - pack_unichar(unich, builder) - return StringBuffer(builder.build()) - - def writebuf_w(self, space): - raise OperationError(space.w_TypeError, space.wrap( - "cannot use unicode as modifiable buffer")) - - #charbuf_w = str_w # XXX: - def listview_unicode(w_self): return _create_list_from_unicode(w_self._value) From noreply at buildbot.pypy.org Wed Apr 30 01:12:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:05 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix fcntl Message-ID: <20140429231205.188E51C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71076:14237185e6ff Date: 2014-04-29 16:09 -0700 http://bitbucket.org/pypy/pypy/changeset/14237185e6ff/ Log: fix fcntl diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -103,7 +103,7 @@ if rv < 0: raise _get_error(space, "fcntl") arg = rffi.charpsize2str(ll_arg, len(arg)) - return space.wrap(arg) + return space.wrapbytes(arg) finally: lltype.free(ll_arg, flavor='raw') @@ -112,7 +112,7 @@ rv = fcntl_int(fd, op, intarg) if rv < 0: raise _get_error(space, "fcntl") - return space.wrapbytes(rv) + return space.wrap(rv) @unwrap_spec(op=int) def flock(space, w_fd, op): @@ -204,7 +204,8 @@ try: rwbuffer = space.writebuf_w(w_arg) except OperationError, e: - if not e.match(space, space.w_TypeError): + if not (e.match(space, space.w_TypeError) or + e.match(space, space.w_BufferError)): raise else: arg = rwbuffer.as_str() @@ -217,7 +218,7 @@ if mutate_flag != 0: rwbuffer.setslice(0, arg) return space.wrap(rv) - return space.wrap(arg) + return space.wrapbytes(arg) finally: lltype.free(ll_arg, flavor='raw') @@ -238,7 +239,7 @@ if rv < 0: raise _get_error(space, "ioctl") arg = rffi.charpsize2str(ll_arg, len(arg)) - return space.wrap(arg) + return space.wrapbytes(arg) finally: lltype.free(ll_arg, flavor='raw') @@ -247,4 +248,4 @@ rv = ioctl_int(fd, op, intarg) if rv < 0: raise _get_error(space, "ioctl") - return space.wrapbytes(rv) + return space.wrap(rv) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -47,8 +47,6 @@ assert fcntl.fcntl(f, 1, 0) == 0 assert fcntl.fcntl(f, 2, "foo") == b"foo" assert fcntl.fcntl(f, 2, memoryview(b"foo")) == b"foo" - exc = raises(TypeError, fcntl.fcntl, f, 2, memoryview(b"foo")) - assert 'integer' in str(exc.value) try: os.O_LARGEFILE @@ -225,12 +223,10 @@ assert buf.tostring() == expected buf = array.array('i', [0]) - res = fcntl.ioctl(mfd, TIOCGPGRP, buffer(buf)) - assert res == expected - assert buf.tostring() == b'\x00' * 4 + res = fcntl.ioctl(mfd, TIOCGPGRP, memoryview(buf)) + assert res == 0 + assert buf.tostring() == expected - exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview(b'abc')) - assert 'integer' in str(exc.value) exc = raises(TypeError, fcntl.ioctl, mfd, TIOCGPGRP, memoryview(b'abc'), False) assert str(exc.value) == "ioctl requires a file or file descriptor, an integer and optionally an integer or buffer argument" From noreply at buildbot.pypy.org Wed Apr 30 01:12:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:06 +0200 (CEST) Subject: [pypy-commit] pypy py3k: add space.w_memoryview Message-ID: <20140429231206.33FA31C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71077:c50c8b4ea684 Date: 2014-04-29 16:09 -0700 http://bitbucket.org/pypy/pypy/changeset/c50c8b4ea684/ Log: add space.w_memoryview diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -354,7 +354,7 @@ ObjSpace.ExceptionTable + ['int', 'str', 'float', 'tuple', 'list', 'dict', 'bytes', 'complex', 'slice', 'bool', - 'text', 'object', 'unicode', 'bytearray']): + 'text', 'object', 'unicode', 'bytearray', 'memoryview']): setattr(space, 'w_' + name, w_some_obj()) space.w_type = w_some_type() # From noreply at buildbot.pypy.org Wed Apr 30 01:12:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:07 +0200 (CEST) Subject: [pypy-commit] pypy py3k: have array support the new buffer interface Message-ID: <20140429231207.6A7061C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71078:4cd75594bdd1 Date: 2014-04-29 16:09 -0700 http://bitbucket.org/pypy/pypy/changeset/4cd75594bdd1/ Log: have array support the new buffer interface diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -138,6 +138,9 @@ self.len = 0 self.allocated = 0 + def buffer_w(self, space, flags): + return ArrayBuffer(self, False) + def readbuf_w(self, space): return ArrayBuffer(self, True) From noreply at buildbot.pypy.org Wed Apr 30 01:12:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:08 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix Message-ID: <20140429231208.939531C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71079:f1fa7709d05e Date: 2014-04-29 16:09 -0700 http://bitbucket.org/pypy/pypy/changeset/f1fa7709d05e/ Log: fix diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -62,9 +62,8 @@ res = self.buffer.getslice(start, stop, step, size) return space.wrapbytes(res) - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - _buffer_setitem(space, self.buffer, w_index, newstring) + def descr_setitem(self, space, w_index, w_newstring): + _buffer_setitem(space, self.buffer, w_index, w_newstring) MiniBuffer.typedef = TypeDef( diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -430,7 +430,7 @@ rwbuffer = None if not space.is_none(w_buf): - rwbuffer = space.rwbuffer_w(w_buf) + rwbuffer = space.getarg_w('w*', w_buf) lgt = rwbuffer.getlength() if num_bytes < 0 or num_bytes > lgt: num_bytes = lgt diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -29,7 +29,7 @@ assert data == bytearray(eval("b'z123fg'")) v[0:3] = v[2:5] assert data == bytearray(eval("b'23f3fg'")) - exc = raises(ValueError, "v[2] = 'spam'") + exc = raises(ValueError, "v[2] = b'spam'") assert str(exc.value) == "cannot modify size of memoryview object" exc = raises(NotImplementedError, "v[0:2:2] = 'spam'") assert str(exc.value) == "" From noreply at buildbot.pypy.org Wed Apr 30 01:12:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k: this is supported in py3k Message-ID: <20140429231209.C1C151C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71080:f800c32de51b Date: 2014-04-29 16:09 -0700 http://bitbucket.org/pypy/pypy/changeset/f800c32de51b/ Log: this is supported in py3k diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -16,12 +16,12 @@ x = marshal.loads(s) assert x == case and type(x) is type(case) - exc = raises(TypeError, marshal.loads, memoryview(s)) - assert str(exc.value) == "must be string or read-only buffer, not memoryview" + y = marshal.loads(memoryview(s)) + assert y == case and type(y) is type(case) import sys if '__pypy__' in sys.builtin_module_names: - f = StringIO.StringIO() + f = BytesIO() marshal.dump(case, f) f.seek(0) x = marshal.load(f) From noreply at buildbot.pypy.org Wed Apr 30 01:12:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Apr 2014 01:12:11 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge upstream Message-ID: <20140429231211.7967F1C02D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71081:e43a6cc83813 Date: 2014-04-29 16:10 -0700 http://bitbucket.org/pypy/pypy/changeset/e43a6cc83813/ Log: merge upstream diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,9 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + try: + imp.load_module('_testcapi', fp, filename, description) + finally: + fp.close() except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -351,7 +351,7 @@ limit -= len(self.keyword_names_w) for i in range(len(self.keywords)): if i < limit: - w_key = space.wrap(self.keywords[i]) + w_key = space.wrap(self.keywords[i].decode('utf-8')) else: w_key = self.keyword_names_w[i - limit] space.setitem(w_kwds, w_key, self.keywords_w[i]) @@ -446,7 +446,7 @@ break else: if i < limit: - w_key = space.wrap(keywords[i]) + w_key = space.wrap(keywords[i].decode('utf-8')) else: w_key = keyword_names_w[i - limit] space.setitem(w_kwds, w_key, keywords_w[i]) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -433,6 +433,9 @@ def getconstant_w(self, index): return self.getcode().co_consts_w[index] + def getname_u(self, index): + return self.space.identifier_w(self.getname_w(index)) + def getname_w(self, index): return self.getcode().co_names_w[index] @@ -753,9 +756,9 @@ self.pushvalue(w_build_class) def STORE_NAME(self, varindex, next_instr): - w_varname = self.getname_w(varindex) + varname = self.getname_u(varindex) w_newvalue = self.popvalue() - self.space.setitem(self.w_locals, w_varname, w_newvalue) + self.space.setitem_str(self.w_locals, varname, w_newvalue) def DELETE_NAME(self, varindex, next_instr): w_varname = self.getname_w(varindex) @@ -765,8 +768,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - raise oefmt(self.space.w_NameError, "name '%s' is not defined", - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, + "name %R is not defined", w_varname) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -817,7 +820,7 @@ self.space.delattr(w_obj, w_attributename) def STORE_GLOBAL(self, nameindex, next_instr): - varname = self.space.str_w(self.getname_w(nameindex)) + varname = self.getname_u(nameindex) w_newvalue = self.popvalue() self.space.setitem_str(self.w_globals, varname, w_newvalue) @@ -827,24 +830,24 @@ def LOAD_NAME(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) + varname = self.space.identifier_w(w_varname) if self.w_locals is not self.w_globals: - w_value = self.space.finditem(self.w_locals, w_varname) + w_value = self.space.finditem_str(self.w_locals, varname) if w_value is not None: self.pushvalue(w_value) return # fall-back - w_value = self._load_global(w_varname) + w_value = self._load_global(varname) if w_value is None: raise oefmt(self.space.w_NameError, "name %R is not defined", w_varname) self.pushvalue(w_value) - def _load_global(self, w_varname): - w_value = self.space.finditem(self.w_globals, w_varname) + def _load_global(self, varname): + w_value = self.space.finditem_str(self.w_globals, varname) if w_value is None: # not in the globals, now look in the built-ins - w_value = self.get_builtin().getdictvalue( - self.space, self.space.identifier_w(w_varname)) + w_value = self.get_builtin().getdictvalue(self.space, varname) return w_value _load_global._always_inline_ = True @@ -855,7 +858,7 @@ def LOAD_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - w_value = self._load_global(w_varname) + w_value = self._load_global(self.space.identifier_w(w_varname)) if w_value is None: self._load_global_failed(w_varname) self.pushvalue(w_value) @@ -993,7 +996,7 @@ if not e.match(self.space, self.space.w_AttributeError): raise raise oefmt(self.space.w_ImportError, - "cannot import name '%s'", self.space.str_w(w_name)) + "cannot import name %R", w_name) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -75,21 +75,18 @@ assert x == 42 def test_list_strategy(self): - py3k_skip("XXX: strategies are currently broken") from __pypy__ import list_strategy l = [1, 2, 3] assert list_strategy(l) == "int" + l = list(range(1, 2)) + assert list_strategy(l) == "int" l = [b"a", b"b", b"c"] assert list_strategy(l) == "bytes" l = ["a", "b", "c"] assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" - l = range(3) - assert list_strategy(l) == "simple_range" - l = range(1, 2) - assert list_strategy(l) == "range" l = [1, "b", 3] assert list_strategy(l) == "object" l = [] diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -16,7 +16,6 @@ W_CType.pack_list_of_items = self._original def test_fast_init_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend LONG = _cffi_backend.new_primitive_type('long') P_LONG = _cffi_backend.new_pointer_type(LONG) @@ -37,7 +36,6 @@ assert buf[2] == 3.3 def test_fast_init_short_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend SHORT = _cffi_backend.new_primitive_type('short') P_SHORT = _cffi_backend.new_pointer_type(SHORT) @@ -50,7 +48,6 @@ raises(OverflowError, _cffi_backend.newp, SHORT_ARRAY, [-40000]) def test_fast_init_longlong_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend import sys large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) @@ -64,7 +61,6 @@ assert buf[3] == large_int def test_fast_init_ushort_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend USHORT = _cffi_backend.new_primitive_type('unsigned short') P_USHORT = _cffi_backend.new_pointer_type(USHORT) @@ -77,18 +73,17 @@ raises(OverflowError, _cffi_backend.newp, USHORT_ARRAY, [-1]) def test_fast_init_ulong_from_list(self): - py3k_skip('XXX: strategies are currently broken') import sys import _cffi_backend ULONG = _cffi_backend.new_primitive_type('unsigned long') P_ULONG = _cffi_backend.new_pointer_type(ULONG) ULONG_ARRAY = _cffi_backend.new_array_type(P_ULONG, None) - buf = _cffi_backend.newp(ULONG_ARRAY, [1, 2, sys.maxint]) + buf = _cffi_backend.newp(ULONG_ARRAY, [1, 2, sys.maxsize]) assert buf[0] == 1 assert buf[1] == 2 - assert buf[2] == sys.maxint + assert buf[2] == sys.maxsize raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-1]) - raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-sys.maxint]) + raises(OverflowError, _cffi_backend.newp, ULONG_ARRAY, [-sys.maxsize]) def test_fast_init_cfloat_from_list(self): import _cffi_backend @@ -109,7 +104,6 @@ assert float(buf[1]) == -3.5 def test_fast_init_bool_from_list(self): - py3k_skip('XXX: strategies are currently broken') import _cffi_backend BOOL = _cffi_backend.new_primitive_type('_Bool') P_BOOL = _cffi_backend.new_pointer_type(BOOL) diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py --- a/pypy/module/_posixsubprocess/interp_subprocess.py +++ b/pypy/module/_posixsubprocess/interp_subprocess.py @@ -219,7 +219,8 @@ def cloexec_pipe(space): - """"cloexec_pipe() -> (read_end, write_end) + """cloexec_pipe() -> (read_end, write_end) + Create a pipe whose ends have the cloexec flag set.""" with lltype.scoped_alloc(rffi.CArrayPtr(rffi.INT).TO, 2) as fds: diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -409,7 +409,7 @@ raise OperationError(space.w_TypeError, space.wrap( "Cannot use string as modifiable buffer")) - def listview_bytes(self): + def listview_int(self): return _create_list_from_bytes(self._value) def ord(self, space): @@ -632,8 +632,8 @@ l = space.listview_bytes(w_list) if l is not None: if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) + return space.wrapbytes(l[0]) + return space.wrapbytes(self._val(space).join(l)) return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): @@ -657,8 +657,8 @@ def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline - # listview_bytes - return [s for s in value] + # listview_int + return [ord(s) for s in value] W_BytesObject.EMPTY = W_BytesObject('') W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -30,7 +30,7 @@ def _wrapkey(space, key): - return space.wrap(key) + return space.wrap(key.decode('utf-8')) class ModuleDictStrategy(DictStrategy): @@ -63,7 +63,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): self.setitem_str(w_dict, space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) @@ -85,7 +85,7 @@ def setdefault(self, w_dict, w_key, w_default): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) w_result = self.getitem_str(w_dict, key) if w_result is not None: @@ -99,7 +99,7 @@ def delitem(self, w_dict, w_key): space = self.space w_key_type = space.type(w_key) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_unicode): key = space.str_w(w_key) dict_w = self.unerase(w_dict.dstorage) try: @@ -120,7 +120,7 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if space.is_w(w_lookup_type, space.w_unicode): return self.getitem_str(w_dict, space.str_w(w_key)) elif _never_equal_to_string(space, w_lookup_type): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -55,10 +55,10 @@ elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) - #elif instance or strdict or module: - # assert w_type is None - # strategy = space.fromcache(BytesDictStrategy) - elif False and kwargs: + elif instance or strdict or module: + assert w_type is None + strategy = space.fromcache(UnicodeDictStrategy) + elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy strategy = space.fromcache(EmptyKwargsDictStrategy) @@ -113,14 +113,15 @@ if w_fill is None: w_fill = space.w_None if space.is_w(w_type, space.w_dict): - w_dict = W_DictMultiObject.allocate_and_init_instance(space, - w_type) - - byteslist = space.listview_bytes(w_keys) - if byteslist is not None: - for key in byteslist: - w_dict.setitem_str(key, w_fill) + ulist = space.listview_unicode(w_keys) + if ulist is not None: + strategy = space.fromcache(UnicodeDictStrategy) + storage = strategy.get_storage_fromkeys(ulist, w_fill) + w_dict = space.allocate_instance(W_DictMultiObject, w_type) + W_DictMultiObject.__init__(w_dict, space, strategy, storage) else: + w_dict = W_DictMultiObject.allocate_and_init_instance(space, + w_type) for w_key in space.listview(w_keys): w_dict.setitem(w_key, w_fill) else: @@ -360,6 +361,9 @@ def get_empty_storage(self): raise NotImplementedError + def decodekey_str(self, key): + return key.decode('utf-8') + @jit.look_inside_iff(lambda self, w_dict: w_dict_unrolling_heuristic(w_dict)) def w_keys(self, w_dict): @@ -430,18 +434,18 @@ return self.erase(None) def switch_to_correct_strategy(self, w_dict, w_key): + from pypy.objspace.std.intobject import W_IntObject withidentitydict = self.space.config.objspace.std.withidentitydict - # if type(w_key) is self.space.StringObjectCls: - # self.switch_to_bytes_strategy(w_dict) - # return + if type(w_key) is self.space.StringObjectCls: + self.switch_to_bytes_strategy(w_dict) + return if type(w_key) is self.space.UnicodeObjectCls: self.switch_to_unicode_strategy(w_dict) return + if type(w_key) is W_IntObject: + self.switch_to_int_strategy(w_dict) + return w_type = self.space.type(w_key) - # XXX: disable IntDictStrategy for now, because in py3k ints are - # actually long - ## if self.space.is_w(w_type, self.space.w_int): - ## self.switch_to_int_strategy(w_dict) if withidentitydict and w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: @@ -500,7 +504,7 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.switch_to_bytes_strategy(w_dict) + self.switch_to_unicode_strategy(w_dict) w_dict.setitem_str(key, w_value) def delitem(self, w_dict, w_key): @@ -700,7 +704,7 @@ def setitem_str(self, w_dict, key, w_value): self.switch_to_object_strategy(w_dict) - w_dict.setitem(self.space.wrap(key), w_value) + w_dict.setitem(self.space.wrap(self.decodekey_str(key)), w_value) def setdefault(self, w_dict, w_key, w_default): if self.is_correct_type(w_key): @@ -722,7 +726,7 @@ return len(self.unerase(w_dict.dstorage)) def getitem_str(self, w_dict, key): - return self.getitem(w_dict, self.space.wrap(key)) + return self.getitem(w_dict, self.space.wrap(self.decodekey_str(key))) def getitem(self, w_dict, w_key): space = self.space @@ -802,7 +806,7 @@ return self.space.newlist(self.unerase(w_dict.dstorage).keys()) def setitem_str(self, w_dict, s, w_value): - self.setitem(w_dict, self.space.wrap(s), w_value) + self.setitem(w_dict, self.space.wrap(self.decodekey_str(s)), w_value) def switch_to_object_strategy(self, w_dict): assert 0, "should be unreachable" @@ -816,10 +820,10 @@ unerase = staticmethod(unerase) def wrap(self, unwrapped): - return self.space.wrap(unwrapped) + return self.space.wrapbytes(unwrapped) def unwrap(self, wrapped): - return self.space.str_w(wrapped) + return self.space.bytes_w(wrapped) def is_correct_type(self, w_obj): space = self.space @@ -833,21 +837,21 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) - def setitem_str(self, w_dict, key, w_value): - assert key is not None - self.unerase(w_dict.dstorage)[key] = w_value + ##def setitem_str(self, w_dict, key, w_value): + ## assert key is not None + ## self.unerase(w_dict.dstorage)[key] = w_value - def getitem(self, w_dict, w_key): - space = self.space - # -- This is called extremely often. Hack for performance -- - if type(w_key) is space.StringObjectCls: - return self.getitem_str(w_dict, w_key.unwrap(space)) - # -- End of performance hack -- - return AbstractTypedStrategy.getitem(self, w_dict, w_key) + ##def getitem(self, w_dict, w_key): + ## space = self.space + ## # -- This is called extremely often. Hack for performance -- + ## if type(w_key) is space.StringObjectCls: + ## return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None) + ## # -- End of performance hack -- + ## return AbstractTypedStrategy.getitem(self, w_dict, w_key) - def getitem_str(self, w_dict, key): - assert key is not None - return self.unerase(w_dict.dstorage).get(key, None) + ##def getitem_str(self, w_dict, key): + ## assert key is not None + ## return self.unerase(w_dict.dstorage).get(key, None) def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() @@ -856,21 +860,21 @@ return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): - return space.wrap(key) + return space.wrapbytes(key) - @jit.look_inside_iff(lambda self, w_dict: - w_dict_unrolling_heuristic(w_dict)) - def view_as_kwargs(self, w_dict): - return (None, None) # XXX: fix me to return unicode keys - d = self.unerase(w_dict.dstorage) - l = len(d) - keys, values = [None] * l, [None] * l - i = 0 - for key, val in d.iteritems(): - keys[i] = key - values[i] = val - i += 1 - return keys, values + ##@jit.look_inside_iff(lambda self, w_dict: + ## w_dict_unrolling_heuristic(w_dict)) + ##def view_as_kwargs(self, w_dict): + ## return (None, None) # XXX: fix me to return unicode keys + ## d = self.unerase(w_dict.dstorage) + ## l = len(d) + ## keys, values = [None] * l, [None] * l + ## i = 0 + ## for key, val in d.iteritems(): + ## keys[i] = key + ## values[i] = val + ## i += 1 + ## return keys, values create_iterator_classes(BytesDictStrategy) @@ -900,43 +904,51 @@ # we should implement the same shortcuts as we do for BytesDictStrategy - ## def setitem_str(self, w_dict, key, w_value): - ## assert key is not None - ## self.unerase(w_dict.dstorage)[key] = w_value + def setitem_str(self, w_dict, key, w_value): + assert key is not None + self.unerase(w_dict.dstorage)[self.decodekey_str(key)] = w_value - ## def getitem(self, w_dict, w_key): - ## space = self.space - ## # -- This is called extremely often. Hack for performance -- - ## if type(w_key) is space.StringObjectCls: - ## return self.getitem_str(w_dict, w_key.unwrap(space)) - ## # -- End of performance hack -- - ## return AbstractTypedStrategy.getitem(self, w_dict, w_key) + def getitem(self, w_dict, w_key): + space = self.space + # -- This is called extremely often. Hack for performance -- + if type(w_key) is space.UnicodeObjectCls: + return self.unerase(w_dict.dstorage).get(w_key.unwrap(space), None) + # -- End of performance hack -- + return AbstractTypedStrategy.getitem(self, w_dict, w_key) - ## def getitem_str(self, w_dict, key): - ## assert key is not None - ## return self.unerase(w_dict.dstorage).get(key, None) + def getitem_str(self, w_dict, key): + assert key is not None + return self.unerase(w_dict.dstorage).get(self.decodekey_str(key), None) def listview_unicode(self, w_dict): return self.unerase(w_dict.dstorage).keys() - ## def w_keys(self, w_dict): - ## return self.space.newlist_bytes(self.listview_bytes(w_dict)) + def w_keys(self, w_dict): + return self.space.newlist_unicode(self.listview_unicode(w_dict)) def wrapkey(space, key): return space.wrap(key) - ## @jit.look_inside_iff(lambda self, w_dict: - ## w_dict_unrolling_heuristic(w_dict)) - ## def view_as_kwargs(self, w_dict): - ## d = self.unerase(w_dict.dstorage) - ## l = len(d) - ## keys, values = [None] * l, [None] * l - ## i = 0 - ## for key, val in d.iteritems(): - ## keys[i] = key - ## values[i] = val - ## i += 1 - ## return keys, values + @jit.look_inside_iff(lambda self, w_dict: + w_dict_unrolling_heuristic(w_dict)) + def view_as_kwargs(self, w_dict): + d = self.unerase(w_dict.dstorage) + l = len(d) + keys, values = [None] * l, [None] * l + i = 0 + for key, val in d.iteritems(): + keys[i] = key.encode('utf-8') + values[i] = val + i += 1 + return keys, values + + def get_storage_fromkeys(self, keys_w, w_fill): + """Return an initialized storage with keys and fill values""" + storage = {} + mark_dict_non_null(storage) + for key in keys_w: + storage[key] = w_fill + return self.erase(storage) create_iterator_classes(UnicodeDictStrategy) @@ -956,8 +968,8 @@ return self.erase({}) def is_correct_type(self, w_obj): - space = self.space - return space.is_w(space.type(w_obj), space.w_int) + from pypy.objspace.std.intobject import W_IntObject + return type(w_obj) is W_IntObject def _never_equal_to(self, w_lookup_type): space = self.space diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -6,16 +6,16 @@ from rpython.rlib import jit, rerased from pypy.objspace.std.dictmultiobject import ( - BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, + DictStrategy, EmptyDictStrategy, ObjectDictStrategy, UnicodeDictStrategy, create_iterator_classes) def _wrapkey(space, key): - return space.wrap(key) + return space.wrap(key.decode('utf-8')) class EmptyKwargsDictStrategy(EmptyDictStrategy): - def switch_to_bytes_strategy(self, w_dict): + def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy @@ -39,7 +39,7 @@ def is_correct_type(self, w_obj): space = self.space - return space.is_w(space.type(w_obj), space.w_str) + return space.is_w(space.type(w_obj), space.w_unicode) def _never_equal_to(self, w_lookup_type): return False @@ -66,7 +66,7 @@ else: # limit the size so that the linear searches don't become too long if len(keys) >= 16: - self.switch_to_bytes_strategy(w_dict) + self.switch_to_unicode_strategy(w_dict) w_dict.setitem_str(key, w_value) else: keys.append(key) @@ -116,7 +116,7 @@ def w_keys(self, w_dict): l = self.unerase(w_dict.dstorage)[0] - return self.space.newlist_bytes(l[:]) + return self.space.newlist_unicode(l[:]) def values(self, w_dict): return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable @@ -145,13 +145,13 @@ w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) - def switch_to_bytes_strategy(self, w_dict): - strategy = self.space.fromcache(BytesDictStrategy) + def switch_to_unicode_strategy(self, w_dict): + strategy = self.space.fromcache(UnicodeDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) for i in range(len(keys)): - d_new[keys[i]] = values_w[i] + d_new[self.decodekey_str(keys[i])] = values_w[i] w_dict.strategy = strategy w_dict.dstorage = storage diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -80,14 +80,11 @@ return space.fromcache(IntegerListStrategy) # check for strings - # XXX: StringListStrategy is currently broken - """ for w_obj in list_w: if not type(w_obj) is W_BytesObject: break else: return space.fromcache(BytesListStrategy) - """ # check for unicode for w_obj in list_w: @@ -166,12 +163,11 @@ self.switch_to_object_strategy() return self - # XXX: BytesListStrategy is currently broken - #@staticmethod - #def newlist_bytes(space, list_b): - # strategy = space.fromcache(BytesListStrategy) - # storage = strategy.erase(list_b) - # return W_ListObject.from_storage_and_strategy(space, storage, strategy) + @staticmethod + def newlist_bytes(space, list_b): + strategy = space.fromcache(BytesListStrategy) + storage = strategy.erase(list_b) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) @staticmethod def newlist_unicode(space, list_u): @@ -875,8 +871,8 @@ def switch_to_correct_strategy(self, w_list, w_item): if type(w_item) is W_IntObject: strategy = self.space.fromcache(IntegerListStrategy) - #elif type(w_item) is W_BytesObject: - # strategy = self.space.fromcache(BytesListStrategy) + elif type(w_item) is W_BytesObject: + strategy = self.space.fromcache(BytesListStrategy) elif type(w_item) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeListStrategy) elif type(w_item) is W_FloatObject: @@ -1662,7 +1658,7 @@ return self.space.wrapbytes(stringval) def unwrap(self, w_string): - return self.space.str_w(w_string) + return self.space.bytes_w(w_string) erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) @@ -1778,7 +1774,7 @@ def lt(self, a, b): return a < b -class StringSort(UnicodeBaseTimSort): +class StringSort(StringBaseTimSort): def lt(self, a, b): return a < b diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -640,7 +640,7 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if space.is_w(w_lookup_type, space.w_unicode): return self.getitem_str(w_dict, space.str_w(w_key)) elif _never_equal_to_string(space, w_lookup_type): return None @@ -659,7 +659,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): self.setitem_str(w_dict, self.space.str_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) @@ -667,7 +667,7 @@ def setdefault(self, w_dict, w_key, w_default): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) w_result = self.getitem_str(w_dict, key) if w_result is not None: @@ -682,7 +682,7 @@ space = self.space w_key_type = space.type(w_key) w_obj = self.unerase(w_dict.dstorage) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_unicode): key = self.space.str_w(w_key) flag = w_obj.deldictvalue(space, key) if not flag: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -316,10 +316,8 @@ assert not list_w or sizehint == -1 return W_ListObject(self, list_w, sizehint) - # XXX: BytesListStrategy is currently broken use the default - # implementation, which simply wraps - #def newlist_bytes(self, list_s): - # return W_ListObject.newlist_bytes(self, list_s) + def newlist_bytes(self, list_s): + return W_ListObject.newlist_bytes(self, list_s) def newlist_unicode(self, list_u): return W_ListObject.newlist_unicode(self, list_u) @@ -502,6 +500,9 @@ return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() + if type(w_obj) is W_BytesObject: + # Python3 considers bytes strings as a list of numbers. + return w_obj.listview_int() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_int() return None diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1439,7 +1439,7 @@ def next_entry(self): for key in self.iterator: - return self.space.wrap(key) + return self.space.wrapbytes(key) else: return None diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -82,10 +82,11 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), wb('el')) - def test_listview_bytes(self): + def test_listview_bytes_int(self): w_bytes = self.space.wrapbytes('abcd') # list(b'abcd') is a list of numbers assert self.space.listview_bytes(w_bytes) == None + assert self.space.listview_int(w_bytes) == [97, 98, 99, 100] class AppTestBytesObject: diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,15 +1,16 @@ +# encoding: utf-8 import py from pypy.objspace.std.celldict import ModuleDictStrategy from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, - FakeString) + FakeUnicode) space = FakeSpace() class TestCellDict(object): - FakeString = FakeString + FakeString = FakeUnicode def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) @@ -50,10 +51,10 @@ v1 = strategy.version x = object() - d.setitem("a", x) + d.setitem(u"a", x) v2 = strategy.version assert v1 is not v2 - d.setitem("a", x) + d.setitem(u"a", x) v3 = strategy.version assert v2 is v3 @@ -70,7 +71,6 @@ assert "ModuleDictStrategy" in __pypy__.internal_repr(obj) def test_check_module_uses_module_dict(self): - py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") m = type(__builtins__)("abc") self.impl_used(m.__dict__) @@ -133,9 +133,12 @@ def setup_class(cls): if cls.runappdirect: py.test.skip("__repr__ doesn't work on appdirect") - strategy = ModuleDictStrategy(cls.space) + + def setup_method(self, method): + space = self.space + strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + self.w_d = W_DictMultiObject(space, strategy, storage) def test_popitem(self): import __pypy__ @@ -148,7 +151,6 @@ assert x == ("a", 3) def test_degenerate(self): - py3k_skip("ModuleDictStrategy is immediately turned into ObjectDictStrategy because we use unicode keys now") import __pypy__ d = self.d @@ -157,3 +159,23 @@ del d["a"] d[object()] = 5 assert list(d.values()) == [5] + + def test_unicode(self): + import __pypy__ + + d = self.d + assert "ModuleDict" in __pypy__.internal_repr(d) + d['λ'] = True + assert "ModuleDict" in __pypy__.internal_repr(d) + assert list(d) == ['λ'] + assert next(iter(d)) == 'λ' + assert "ModuleDict" in __pypy__.internal_repr(d) + + d['foo'] = 'bar' + assert sorted(d) == ['foo', 'λ'] + assert "ModuleDict" in __pypy__.internal_repr(d) + + o = object() + d[o] = 'baz' + assert set(d) == set(['foo', 'λ', o]) + assert "ObjectDictStrategy" in __pypy__.internal_repr(d) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -3,7 +3,7 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + BytesDictStrategy, ObjectDictStrategy, UnicodeDictStrategy) class TestW_DictObject(object): @@ -125,12 +125,10 @@ assert self.space.eq_w(space.call_function(get, w("33"), w(44)), w(44)) def test_fromkeys_fastpath(self): - py.test.py3k_skip("XXX: strategies are currently broken") space = self.space w = space.wrap - wb = space.wrapbytes - w_l = self.space.newlist([wb("a"),wb("b")]) + w_l = space.newlist([w("a"),w("b")]) w_l.getitems = None w_d = space.call_method(space.w_dict, "fromkeys", w_l) @@ -138,7 +136,6 @@ assert space.eq_w(w_d.getitem_str("b"), space.w_None) def test_listview_bytes_dict(self): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap wb = self.space.wrapbytes w_d = self.space.newdict() @@ -152,30 +149,30 @@ assert self.space.listview_unicode(w_d) == [u"a", u"b"] def test_listview_int_dict(self): - py.test.py3k_skip("IntDictStrategy not supported yet") w = self.space.wrap w_d = self.space.newdict() w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) assert self.space.listview_int(w_d) == [1, 2] def test_keys_on_string_unicode_int_dict(self, monkeypatch): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap wb = self.space.wrapbytes w_d = self.space.newdict() w_d.initialize_content([(w(1), wb("a")), (w(2), wb("b"))]) - w_l = self.space.call_method(w_d, "keys") + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_int(w_l)) == [1,2] - # make sure that .keys() calls newlist_bytes for string dicts + # make sure that list(d.keys()) calls newlist_bytes for byte dicts def not_allowed(*args): assert False, 'should not be called' monkeypatch.setattr(self.space, 'newlist', not_allowed) # w_d = self.space.newdict() - w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) - w_l = self.space.call_method(w_d, "keys") + w_d.initialize_content([(wb("a"), w(1)), (wb("b"), w(6))]) + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), @@ -183,7 +180,8 @@ monkeypatch.undo() w_d = self.space.newdict() w_d.initialize_content([(w(u"a"), w(1)), (w(u"b"), w(6))]) - w_l = self.space.call_method(w_d, "keys") + w_k = self.space.call_method(w_d, "keys") + w_l = self.space.call_function(self.space.w_list, w_k) assert sorted(self.space.listview_unicode(w_l)) == [u"a", u"b"] class AppTest_DictObject: @@ -952,10 +950,9 @@ return r[r.find("(") + 1: r.find(")")] def test_empty_to_string(self): - py3k_skip("StringDictStrategy not supported yet") d = {} assert "EmptyDictStrategy" in self.get_strategy(d) - d["a"] = 1 + d[b"a"] = 1 assert "BytesDictStrategy" in self.get_strategy(d) class O(object): @@ -964,7 +961,7 @@ d = o.__dict__ = {} assert "EmptyDictStrategy" in self.get_strategy(d) o.a = 1 - assert "BytesDictStrategy" in self.get_strategy(d) + assert "UnicodeDictStrategy" in self.get_strategy(d) def test_empty_to_unicode(self): d = {} @@ -1017,9 +1014,16 @@ # gives us (1, 2), but 1 is not in the dict any longer. #raises(RuntimeError, list, it) + def test_bytes_to_object(self): + d = {b'a': 'b'} + d[object()] = None + assert b'a' in list(d) -class FakeWrapper(object): + +class FakeString(str): + hash_count = 0 + def unwrap(self, space): self.unwrapped = True return str(self) @@ -1028,11 +1032,18 @@ self.hash_count += 1 return str.__hash__(self) -class FakeString(FakeWrapper, str): - pass +class FakeUnicode(unicode): -class FakeUnicode(FakeWrapper, unicode): - pass + hash_count = 0 + + def unwrap(self, space): + self.unwrapped = True + return unicode(self) + + def __hash__(self): + self.hash_count += 1 + return unicode.__hash__(self) + # the minimal 'space' needed to use a W_DictMultiObject class FakeSpace: @@ -1054,22 +1065,42 @@ return l def newlist_bytes(self, l): return l + def newlist_unicode(self, l): + return l DictObjectCls = W_DictMultiObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str + if isinstance(w_obj, FakeUnicode): + return unicode return type(w_obj) w_str = str + w_unicode = unicode def str_w(self, string): + if isinstance(string, unicode): + return string.encode('utf-8') assert isinstance(string, str) return string + def bytes_w(self, string): + assert isinstance(string, str) + return string + + def unicode_w(self, string): + assert isinstance(string, unicode) + return string + def int_w(self, integer, allow_conversion=True): assert isinstance(integer, int) return integer def wrap(self, obj): + if isinstance(obj, str): + return obj.decode('ascii') + return obj + + def wrapbytes(self, obj): return obj def isinstance_w(self, obj, klass): @@ -1144,13 +1175,18 @@ assert value == d.descr_getitem(self.space, key) class BaseTestRDictImplementation: + FakeString = FakeUnicode + _str_devolves = False def setup_method(self,method): self.fakespace = FakeSpace() - self.string = self.fakespace.wrap("fish") - self.string2 = self.fakespace.wrap("fish2") + self.string = self.wrapstrorunicode("fish") + self.string2 = self.wrapstrorunicode("fish2") self.impl = self.get_impl() + def wrapstrorunicode(self, obj): + return self.fakespace.wrap(obj) + def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() @@ -1178,21 +1214,22 @@ else: assert a == self.string2 assert b == 2000 - assert self.impl.getitem_str(self.string) == 1000 + if not self._str_devolves: + result = self.impl.getitem_str(self.string) + else: + result = self.impl.getitem(self.string) + assert result == 1000 self.check_not_devolved() def test_setitem(self): self.impl.setitem(self.string, 1000) assert self.impl.length() == 1 assert self.impl.getitem(self.string) == 1000 - assert self.impl.getitem_str(self.string) == 1000 - self.check_not_devolved() - - def test_setitem_str(self): - self.impl.setitem_str(self.fakespace.str_w(self.string), 1000) - assert self.impl.length() == 1 - assert self.impl.getitem(self.string) == 1000 - assert self.impl.getitem_str(self.string) == 1000 + if not self._str_devolves: + result = self.impl.getitem_str(self.string) + else: + result = self.impl.getitem(self.string) + assert result == 1000 self.check_not_devolved() def test_delitem(self): @@ -1256,14 +1293,14 @@ def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names impl = self.impl - key = FakeString(self.string) + key = self.FakeString(self.string) x = impl.setdefault(key, 1) assert x == 1 - if on_pypy: + if on_pypy and self.FakeString is FakeString: assert key.hash_count == 1 x = impl.setdefault(key, 2) assert x == 1 - if on_pypy: + if on_pypy and self.FakeString is FakeString: assert key.hash_count == 2 def test_fallback_evil_key(self): @@ -1296,20 +1333,34 @@ assert w_key not in d.w_keys() assert F() not in d.w_keys() -class TestBytesDictImplementation(BaseTestRDictImplementation): - StrategyClass = BytesDictStrategy +class TestUnicodeDictImplementation(BaseTestRDictImplementation): + StrategyClass = UnicodeDictStrategy def test_str_shortcut(self): self.fill_impl() - s = FakeString(self.string) + s = self.FakeString(self.string) assert self.impl.getitem(s) == 1000 assert s.unwrapped def test_view_as_kwargs(self): - py.test.py3k_skip("XXX: strategies are currently broken") self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) + def test_setitem_str(self): + self.impl.setitem_str(self.fakespace.str_w(self.string), 1000) + assert self.impl.length() == 1 + assert self.impl.getitem(self.string) == 1000 + assert self.impl.getitem_str(self.string) == 1000 + self.check_not_devolved() + +class TestBytesDictImplementation(BaseTestRDictImplementation): + StrategyClass = BytesDictStrategy + FakeString = FakeString + _str_devolves = True + + def wrapstrorunicode(self, obj): + return self.fakespace.wrapbytes(obj) + class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): @@ -1319,13 +1370,12 @@ def check_not_devolved(self): pass -class TestDevolvedBytesDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = BytesDictStrategy +class TestDevolvedUnicodeDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = UnicodeDictStrategy def test_module_uses_strdict(): - py.test.py3k_skip("XXX: strategies are currently broken") fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.strategy) is UnicodeDictStrategy diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py --- a/pypy/objspace/std/test/test_identitydict.py +++ b/pypy/objspace/std/test/test_identitydict.py @@ -1,8 +1,6 @@ import py from pypy.interpreter.gateway import interp2app -py.test.py3k_skip("XXX: strategies are currently broken") - class AppTestComparesByIdentity: spaceconfig = {"objspace.std.withidentitydict": True} diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import py from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject from pypy.objspace.std.kwargsdict import * @@ -73,7 +74,7 @@ for i in range(100): assert d.setitem_str("d%s" % i, 4) is None assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert "UnicodeDictStrategy" == d.strategy.__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -133,7 +134,6 @@ return r[r.find("(") + 1: r.find(")")] def test_create(self): - py3k_skip("need UnicodeDictStrategy to work in py3k") def f(**args): return args d = f(a=1) @@ -149,7 +149,6 @@ assert sorted(f(a=2, b=3).values()) == [2, 3] def test_setdefault(self): - py3k_skip("XXX: strategies are currently broken") def f(**args): return args d = f(a=1, b=2) @@ -161,3 +160,23 @@ assert a == 3 assert "KwargsDictStrategy" in self.get_strategy(d) + def test_unicode(self): + """ + def f(**kwargs): + return kwargs + + d = f(λ=True) + assert list(d) == ['λ'] + assert next(iter(d)) == 'λ' + assert "KwargsDictStrategy" in self.get_strategy(d) + + d['foo'] = 'bar' + assert sorted(d) == ['foo', 'λ'] + assert "KwargsDictStrategy" in self.get_strategy(d) + + d = f(λ=True) + o = object() + d[o] = 'baz' + assert set(d) == set(['λ', o]) + assert "ObjectDictStrategy" in self.get_strategy(d) + """ diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -402,7 +402,6 @@ self.space.w_True) def test_sizehint(self): - py.test.py3k_skip("XXX: strategies are currently broken") space = self.space w_l = space.newlist([], sizehint=10) assert isinstance(w_l.strategy, SizeListStrategy) @@ -419,7 +418,6 @@ assert w_lst.strategy.sizehint == 13 def test_find_fast_on_intlist(self, monkeypatch): - py.test.py3k_skip("XXX: strategies are currently broken") monkeypatch.setattr(self.space, "eq_w", None) w = self.space.wrap intlist = W_ListObject(self.space, [w(1),w(2),w(3),w(4),w(5),w(6),w(7)]) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,3 @@ -import py import sys from pypy.objspace.std.listobject import ( W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, @@ -7,7 +6,6 @@ from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject -py.test.py3k_skip("XXX: strategies are currently broken") class TestW_ListStrategies(TestW_ListObject): def test_check_strategy(self): @@ -186,6 +184,7 @@ def test_setslice(self): space = self.space w = space.wrap + wb = space.wrapbytes l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -579,9 +578,11 @@ assert not self.space.eq_w(l1, l2) def test_weird_rangelist_bug(self): - l = make_range_list(self.space, 1, 1, 3) + space = self.space + l = make_range_list(space, 1, 1, 3) # should not raise - assert l.descr_getslice(self.space, self.space.wrap(15), self.space.wrap(2222)).strategy == self.space.fromcache(EmptyListStrategy) + w_slice = space.newslice(space.wrap(15), space.wrap(2222), space.wrap(1)) + assert l.descr_getitem(space, w_slice).strategy == space.fromcache(EmptyListStrategy) def test_add_to_rangelist(self): l1 = make_range_list(self.space, 1, 1, 3) @@ -642,13 +643,13 @@ def test_string_uses_newlist_bytes(self): space = self.space - w_s = space.wrap("a b c") + w_s = space.wrapbytes("a b c") space.newlist = None try: w_l = space.call_method(w_s, "split") - w_l2 = space.call_method(w_s, "split", space.wrap(" ")) + w_l2 = space.call_method(w_s, "split", space.wrapbytes(" ")) w_l3 = space.call_method(w_s, "rsplit") - w_l4 = space.call_method(w_s, "rsplit", space.wrap(" ")) + w_l4 = space.call_method(w_s, "rsplit", space.wrapbytes(" ")) finally: del space.newlist assert space.listview_bytes(w_l) == ["a", "b", "c"] @@ -680,8 +681,6 @@ assert space.unwrap(w_res) == 3 def test_create_list_from_set(self): - # this test fails because of the "w_set.iter = None" line below - py.test.py3k_skip("missing the correct list strategy") from pypy.objspace.std.setobject import W_SetObject from pypy.objspace.std.setobject import _initialize_set diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -84,12 +84,12 @@ assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) def test_create_set_from_list(self): - py.test.py3k_skip("XXX: strategies are currently broken") from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.setobject import BytesSetStrategy, ObjectSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject w = self.space.wrap + wb = self.space.wrapbytes intstr = self.space.fromcache(IntegerSetStrategy) tmp_func = intstr.get_storage_from_list # test if get_storage_from_list is no longer used @@ -101,10 +101,10 @@ assert w_set.strategy is intstr assert intstr.unerase(w_set.sstorage) == {1:None, 2:None, 3:None} - w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) + w_list = W_ListObject(self.space, [wb("1"), wb("2"), wb("3")]) w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) - assert w_set.strategy is self.space.fromcache(UnicodeSetStrategy) + assert w_set.strategy is self.space.fromcache(BytesSetStrategy) assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} w_list = self.space.iter(W_ListObject(self.space, [w(u"1"), w(u"2"), w(u"3")])) @@ -131,13 +131,13 @@ intstr.get_storage_from_list = tmp_func def test_listview_bytes_int_on_set(self): - py.test.py3k_skip("XXX: strategies are currently broken") w = self.space.wrap + wb = self.space.wrapbytes w_a = W_SetObject(self.space) - _initialize_set(self.space, w_a, w("abcdefg")) - assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") - assert self.space.listview_int(w_a) is None + _initialize_set(self.space, w_a, wb("abcdefg")) + assert sorted(self.space.listview_int(w_a)) == [97, 98, 99, 100, 101, 102, 103] + assert self.space.listview_bytes(w_a) is None w_b = W_SetObject(self.space) _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) @@ -1006,6 +1006,13 @@ # gives us 1, but 1 is not in the set any longer. raises(RuntimeError, list, it) + def test_iter_bytes_strategy(self): + l = [b'a', b'b'] + s = set(l) + n = next(iter(s)) + assert type(n) is bytes + assert n in l + def test_unicodestrategy(self): s = 'àèìòù' myset = set([s]) diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -5,9 +5,6 @@ UnicodeIteratorImplementation, UnicodeSetStrategy) from pypy.objspace.std.listobject import W_ListObject -import py -py.test.py3k_skip("XXX: strategies are currently broken") - class TestW_SetStrategies: def wrapped(self, l, bytes=False): From noreply at buildbot.pypy.org Wed Apr 30 03:22:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 30 Apr 2014 03:22:30 +0200 (CEST) Subject: [pypy-commit] pypy default: backout c4296bb01a13, get_module_type_name is used for a reason Message-ID: <20140430012230.28BBA1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71082:c94a4ee2aa7d Date: 2014-04-29 21:14 -0400 http://bitbucket.org/pypy/pypy/changeset/c94a4ee2aa7d/ Log: backout c4296bb01a13, get_module_type_name is used for a reason diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1415,10 +1415,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - e = oefmt(self.w_TypeError, "must be %s, not None", expected) + name = "None" else: - e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) - raise e + name = self.type(w_obj).get_module_type_name() + raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) @specialize.arg(1) def getarg_w(self, code, w_obj): From noreply at buildbot.pypy.org Wed Apr 30 03:22:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 30 Apr 2014 03:22:31 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify get_module_type_name Message-ID: <20140430012231.5BB8E1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71083:4bb8079f8c4f Date: 2014-04-29 21:21 -0400 http://bitbucket.org/pypy/pypy/changeset/4bb8079f8c4f/ Log: simplify get_module_type_name diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -490,14 +490,11 @@ def get_module_type_name(w_self): space = w_self.space w_mod = w_self.get_module() - if not space.isinstance_w(w_mod, space.w_str): - mod = '__builtin__' - else: + if space.isinstance_w(w_mod, space.w_str): mod = space.str_w(w_mod) - if mod != '__builtin__': - return '%s.%s' % (mod, w_self.name) - else: - return w_self.name + if mod != '__builtin__': + return '%s.%s' % (mod, w_self.name) + return w_self.name def getname(w_self, space): name = w_self.name From noreply at buildbot.pypy.org Wed Apr 30 15:02:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 30 Apr 2014 15:02:26 +0200 (CEST) Subject: [pypy-commit] pypy default: update force-build Message-ID: <20140430130226.38D951D294E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71084:f3642fa28522 Date: 2014-04-30 16:00 +0300 http://bitbucket.org/pypy/pypy/changeset/f3642fa28522/ Log: update force-build diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -41,10 +41,9 @@ import pwd return pwd.getpwuid(os.getuid())[0] -def main(): +def main(branch, server): #XXX: handle release tags #XXX: handle validity checks - branch = sys.argv[1] lock = defer.DeferredLock() requests = [] def ebList(err): @@ -54,10 +53,11 @@ for builder in BUILDERS: print 'Forcing', builder, '...' - url = "http://buildbot.pypy.org/builders/" + builder + "/force" + url = "http://" + server + "/builders/" + builder + "/force" args = [ ('username', get_user()), ('revision', ''), + ('forcescheduler', 'Force Scheduler'), ('submit', 'Force Build'), ('branch', branch), ('comments', "Forced by command line script")] @@ -73,4 +73,11 @@ if __name__ == '__main__': log.startLogging(sys.stdout) - main() + import optparse + parser = optparse.OptionParser() + parser.add_option("-b", "--branch", help="branch to build", default='') + parser.add_option("-s", "--server", help="buildbot server", default="buildbot.pypy.org") + (options, args) = parser.parse_args() + if not options.branch: + parser.error("branch option required") + main(options.branch, options.server) From noreply at buildbot.pypy.org Wed Apr 30 15:04:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:04:06 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a test for cast_float_to_singlefloat. Fails so far because Message-ID: <20140430130406.AC6921D294E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71085:9761ff01220d Date: 2014-04-30 14:38 +0200 http://bitbucket.org/pypy/pypy/changeset/9761ff01220d/ Log: Add a test for cast_float_to_singlefloat. Fails so far because the x86 backend doesn't correctly zero out the upper 4 bytes. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4347,3 +4347,10 @@ 'void') assert foo[0] == 1789201 lltype.free(foo, flavor='raw') + + def test_cast_float_to_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + res = self.execute_operation(rop.CAST_FLOAT_TO_SINGLEFLOAT, + [BoxFloat(12.5)], 'int') + assert res.getint() == struct.unpack("I", struct.pack("f", 12.5))[0] From noreply at buildbot.pypy.org Wed Apr 30 15:04:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:04:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Finally clean up this small mess: replace MOVD with MOVDQ or MOVD32. Message-ID: <20140430130407.E1B491D294E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71086:10ad80d9a210 Date: 2014-04-30 14:53 +0200 http://bitbucket.org/pypy/pypy/changeset/10ad80d9a210/ Log: Finally clean up this small mess: replace MOVD with MOVDQ or MOVD32. The latter is always 32-bit. The former is always WORD-sized and corresponds to either MOVD or MOVQ from the Intel documentation. Fixes 9761ff01220d. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1173,13 +1173,13 @@ self.mc.CVTSD2SS(loctmp, loc0) assert isinstance(resloc, RegLoc) assert isinstance(loctmp, RegLoc) - self.mc.MOVD_rx(resloc.value, loctmp.value) + self.mc.MOVD32_rx(resloc.value, loctmp.value) def genop_cast_singlefloat_to_float(self, op, arglocs, resloc): loc0, = arglocs assert isinstance(resloc, RegLoc) assert isinstance(loc0, RegLoc) - self.mc.MOVD_xr(resloc.value, loc0.value) + self.mc.MOVD32_xr(resloc.value, loc0.value) self.mc.CVTSS2SD_xx(resloc.value, resloc.value) def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc): @@ -1187,7 +1187,7 @@ if longlong.is_64_bit: assert isinstance(resloc, RegLoc) assert isinstance(loc0, RegLoc) - self.mc.MOVD(resloc, loc0) + self.mc.MOVDQ(resloc, loc0) else: self.mov(loc0, resloc) @@ -1196,7 +1196,7 @@ if longlong.is_64_bit: assert isinstance(resloc, RegLoc) assert isinstance(loc0, RegLoc) - self.mc.MOVD(resloc, loc0) + self.mc.MOVDQ(resloc, loc0) else: self.mov(loc0, resloc) @@ -1262,7 +1262,7 @@ loc = arglocs[0] assert isinstance(resloc, RegLoc) if isinstance(loc, RegLoc): - self.mc.MOVD_rx(resloc.value, loc.value) + self.mc.MOVD32_rx(resloc.value, loc.value) elif isinstance(loc, FrameLoc): self.mc.MOV_rb(resloc.value, loc.value) else: @@ -1277,16 +1277,16 @@ assert isinstance(loc1, RegLoc) assert isinstance(loc2, RegLoc) assert isinstance(resloc, RegLoc) - self.mc.MOVD_xr(loc2.value, loc1.value) + self.mc.MOVD32_xr(loc2.value, loc1.value) self.mc.PSRAD_xi(loc2.value, 31) # -> 0 or -1 - self.mc.MOVD_xr(resloc.value, loc1.value) + self.mc.MOVD32_xr(resloc.value, loc1.value) self.mc.PUNPCKLDQ_xx(resloc.value, loc2.value) def genop_llong_from_uint(self, op, arglocs, resloc): loc1, = arglocs assert isinstance(resloc, RegLoc) assert isinstance(loc1, RegLoc) - self.mc.MOVD_xr(resloc.value, loc1.value) + self.mc.MOVD32_xr(resloc.value, loc1.value) def genop_llong_eq(self, op, arglocs, resloc): loc1, loc2, locxtmp = arglocs @@ -1571,8 +1571,8 @@ self.mc.OR_rr(edx.value, eax.value) else: loc1, = arglocs - self.mc.MOVD_xr(loc1.value, edx.value) - self.mc.MOVD_xr(resloc.value, eax.value) + self.mc.MOVD32_xr(loc1.value, edx.value) + self.mc.MOVD32_xr(resloc.value, eax.value) self.mc.PUNPCKLDQ_xx(resloc.value, loc1.value) def genop_guard_guard_true(self, ign_1, guard_op, guard_token, locs, ign_2): diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -242,8 +242,8 @@ if self.tmpresloc is None: if self.restype == 'L': # long long # move eax/edx -> xmm0 - self.mc.MOVD_xr(resloc.value^1, edx.value) - self.mc.MOVD_xr(resloc.value, eax.value) + self.mc.MOVD32_xr(resloc.value^1, edx.value) + self.mc.MOVD32_xr(resloc.value, eax.value) self.mc.PUNPCKLDQ_xx(resloc.value, resloc.value^1) else: # float: we have to go via the stack @@ -435,7 +435,7 @@ if isinstance(src, ImmedLoc): self.mc.MOV(X86_64_SCRATCH_REG, src) src = X86_64_SCRATCH_REG - self.mc.MOVD(dst, src) + self.mc.MOVD32(dst, src) # Finally remap the arguments in the main regs remap_frame_layout(self.asm, src_locs, dst_locs, X86_64_SCRATCH_REG) @@ -447,7 +447,7 @@ if self.restype == 'S' and self.tmpresloc is None: # singlefloat return: use MOVD to load the target register # from the lower 32 bits of XMM0 - self.mc.MOVD(self.resloc, xmm0) + self.mc.MOVD32(self.resloc, xmm0) else: CallBuilderX86.load_result(self) @@ -469,7 +469,7 @@ if self.restype == 'S': # singlefloat return: use MOVD to store the lower 32 bits # of XMM0 into the tmpresloc (register or [ESP]) - self.mc.MOVD(self.tmpresloc, xmm0) + self.mc.MOVD32(self.tmpresloc, xmm0) else: assert self.restype == INT self.mc.MOV(self.tmpresloc, eax) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -662,7 +662,8 @@ PXOR = _binaryop('PXOR') PCMPEQD = _binaryop('PCMPEQD') - MOVD = _binaryop('MOVD') + MOVDQ = _binaryop('MOVDQ') + MOVD32 = _binaryop('MOVD32') CALL = _relative_unaryop('CALL') JMP = _relative_unaryop('JMP') diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -617,12 +617,17 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - # These work on machine sized registers, so MOVD is actually MOVQ - # when running on 64 bits. Note a bug in the Intel documentation: + # These work on machine sized registers, so "MOVDQ" is MOVD when running + # on 32 bits and MOVQ when running on 64 bits. "MOVD32" is always 32-bit. + # Note a bug in the Intel documentation: # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html - MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') - MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) + MOVDQ_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVDQ_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVDQ_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) + + MOVD32_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD32_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD32_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -183,8 +183,11 @@ g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) # - if instrname == 'MOVD' and self.WORD == 8: - instrname = 'MOVQ' + if instrname == 'MOVDQ': + if self.WORD == 8: + instrname = 'MOVQ' + else: + instrname = 'MOVD' if argmodes == 'xb': py.test.skip('"as" uses an undocumented alternate encoding??') # From noreply at buildbot.pypy.org Wed Apr 30 15:04:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:04:09 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140430130409.02D051D294E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71087:d707051da931 Date: 2014-04-30 15:02 +0200 http://bitbucket.org/pypy/pypy/changeset/d707051da931/ Log: merge heads diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -41,10 +41,9 @@ import pwd return pwd.getpwuid(os.getuid())[0] -def main(): +def main(branch, server): #XXX: handle release tags #XXX: handle validity checks - branch = sys.argv[1] lock = defer.DeferredLock() requests = [] def ebList(err): @@ -54,10 +53,11 @@ for builder in BUILDERS: print 'Forcing', builder, '...' - url = "http://buildbot.pypy.org/builders/" + builder + "/force" + url = "http://" + server + "/builders/" + builder + "/force" args = [ ('username', get_user()), ('revision', ''), + ('forcescheduler', 'Force Scheduler'), ('submit', 'Force Build'), ('branch', branch), ('comments', "Forced by command line script")] @@ -73,4 +73,11 @@ if __name__ == '__main__': log.startLogging(sys.stdout) - main() + import optparse + parser = optparse.OptionParser() + parser.add_option("-b", "--branch", help="branch to build", default='') + parser.add_option("-s", "--server", help="buildbot server", default="buildbot.pypy.org") + (options, args) = parser.parse_args() + if not options.branch: + parser.error("branch option required") + main(options.branch, options.server) From noreply at buildbot.pypy.org Wed Apr 30 15:05:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:05:23 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: missing POP_j Message-ID: <20140430130523.216231D294E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71088:b0a7c1fa4800 Date: 2014-04-29 15:49 +0200 http://bitbucket.org/pypy/pypy/changeset/b0a7c1fa4800/ Log: missing POP_j diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -569,6 +569,7 @@ POP_r = insn(rex_nw, register(1), '\x58') POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) POP_m = insn(rex_nw, '\x8F', orbyte(0<<3), mem_reg_plus_const(1)) + POP_j = insn(rex_nw, '\x8F', orbyte(0<<3), abs_(1)) # note: the segment specified in LEA should always be SEGMENT_NO; # if instead you give it a SEGMENT_*S, it is ignored diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -261,3 +261,8 @@ s = CodeBuilder64() s.MOV8_jr((SEGMENT_GS, 51), ebx | BYTE_REG_FLAG) assert s.getvalue() == '\x65\x88\x1C\x25\x33\x00\x00\x00' + +def test_pop_j(): + s = CodeBuilder64() + s.POP_j((SEGMENT_GS, -440)) + assert s.getvalue() == '\x65\x8f\x04\x25\x48\xfe\xff\xff' From noreply at buildbot.pypy.org Wed Apr 30 15:05:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:05:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140430130524.BA7F11D294E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71089:3d931c688c39 Date: 2014-04-30 15:03 +0200 http://bitbucket.org/pypy/pypy/changeset/3d931c688c39/ Log: merge heads diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -39,8 +39,8 @@ '_atomic_enter': 'interp_atomic.atomic_enter', '_exclusive_atomic_enter': 'interp_atomic.exclusive_atomic_enter', '_atomic_exit': 'interp_atomic.atomic_exit', - 'last_abort_info': 'interp_atomic.last_abort_info', - 'discard_last_abort_info': 'interp_atomic.discard_last_abort_info', + 'longest_abort_info': 'interp_atomic.longest_abort_info', + 'reset_longest_abort_info':'interp_atomic.reset_longest_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', } def activate(self, space): diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -59,8 +59,16 @@ else: return space.wrap(1) -def last_abort_info(space): - return space.w_None +def longest_abort_info(space): + if space.config.translation.stm: + from rpython.rlib.rstm import longest_abort_info + a, b, c, d = longest_abort_info() + return space.newtuple([space.wrap(a), space.wrap(b), + space.wrap(c), space.wrap(d)]) + else: + return space.w_None -def discard_last_abort_info(space): - pass +def reset_longest_abort_info(space): + if space.config.translation.stm: + from rpython.rlib.rstm import reset_longest_abort_info + reset_longest_abort_info() diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -132,6 +132,18 @@ def pop_marker(): llop.stm_pop_marker(lltype.Void) + at dont_look_inside +def longest_abort_info(): + state = llop.stm_longest_marker_state(lltype.Signed) + time = llop.stm_longest_marker_time(lltype.Float) + cself = llop.stm_longest_marker_self(rffi.CCHARP) + cother = llop.stm_longest_marker_other(rffi.CCHARP) + return (state, time, rffi.charp2str(cself), rffi.charp2str(cother)) + + at dont_look_inside +def reset_longest_abort_info(): + llop.stm_reset_longest_marker_state(lltype.Void) + # ____________________________________________________________ def make_perform_transaction(func, CONTAINERP): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -457,22 +457,11 @@ 'stm_expand_marker': LLOp(), 'stm_setup_expand_marker_for_pypy': LLOp(), -## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), -## 'stm_become_inevitable': LLOp(canmallocgc=True), -## 'stm_stop_all_other_threads': LLOp(canmallocgc=True), -## 'stm_partial_commit_and_resume_other_threads': LLOp(canmallocgc=True), -## 'stm_minor_collect': LLOp(canmallocgc=True), -## 'stm_major_collect': LLOp(canmallocgc=True), -## 'stm_get_tid': LLOp(canfold=True), -## 'stm_ptr_eq': LLOp(canfold=True), - -## 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), - -## 'stm_get_adr_of_private_rev_num':LLOp(), -## 'stm_get_adr_of_read_barrier_cache':LLOp(), -## 'stm_get_adr_of_nursery_current': LLOp(), -## 'stm_get_adr_of_nursery_nextlimit': LLOp(), -## 'stm_get_adr_of_active': LLOp(), + 'stm_longest_marker_state': LLOp(), + 'stm_longest_marker_time': LLOp(), + 'stm_longest_marker_self': LLOp(), + 'stm_longest_marker_other': LLOp(), + 'stm_reset_longest_marker_state': LLOp(), # __________ address operations __________ diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -239,3 +239,25 @@ assert len(offsets) == 4 return 'pypy_stm_setup_expand_marker(%s, %s, %s, %s);' % ( offsets[0], offsets[1], offsets[2], offsets[3]) + +def stm_longest_marker_state(funcgen, op): + result = funcgen.expr(op.result) + return '%s = (Signed)stm_thread_local.longest_marker_state;' % (result,) + +def stm_longest_marker_time(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_thread_local.longest_marker_time;' % (result,) + +def stm_longest_marker_self(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_thread_local.longest_marker_self;' % (result,) + +def stm_longest_marker_other(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_thread_local.longest_marker_other;' % (result,) + +def stm_reset_longest_marker_state(funcgen, op): + return ('stm_thread_local.longest_marker_state = 0;\n' + 'stm_thread_local.longest_marker_time = 0.0;\n' + 'stm_thread_local.longest_marker_self[0] = 0;\n' + 'stm_thread_local.longest_marker_other[0] = 0;') diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -889897f2f5ef +4bde66e3b621 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -100,7 +100,8 @@ static void contention_management(uint8_t other_segment_num, - enum contention_kind_e kind) + enum contention_kind_e kind, + object_t *obj) { assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); @@ -162,10 +163,12 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; + marker_contention(kind, false, other_segment_num, obj); change_timing_state(wait_category); - /* XXX should also tell other_pseg "please commit soon" */ + /* tell the other to commit ASAP */ + signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); @@ -177,12 +180,22 @@ if (must_abort()) abort_with_mutex(); - change_timing_state(STM_TIME_RUN_CURRENT); + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + double elapsed = + change_timing_state_tl(pseg->pub.running_thread, + STM_TIME_RUN_CURRENT); + marker_copy(pseg->pub.running_thread, pseg, + wait_category, elapsed); } else if (!contmgr.abort_other) { + /* tell the other to commit ASAP, since it causes aborts */ + signal_other_to_commit_soon(contmgr.other_pseg); + dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; + marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } @@ -190,6 +203,7 @@ /* We have to signal the other thread to abort, and wait until it does. */ contmgr.other_pseg->pub.nursery_end = abort_category; + marker_contention(kind, true, other_segment_num, obj); int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -257,10 +271,18 @@ abort_data_structures_from_segment_num(other_segment_num); } dprintf(("killed other thread\n")); + + /* we should commit soon, we caused an abort */ + //signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num)); + if (!STM_PSEGMENT->signalled_to_commit_soon) { + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + } } } -static void write_write_contention_management(uintptr_t lock_idx) +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj) { s_mutex_lock(); @@ -271,7 +293,7 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION); + contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -280,12 +302,13 @@ s_mutex_unlock(); } -static void write_read_contention_management(uint8_t other_segment_num) +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj) { - contention_management(other_segment_num, WRITE_READ_CONTENTION); + contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION); + contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); } diff --git a/rpython/translator/stm/src_stm/stm/contention.h b/rpython/translator/stm/src_stm/stm/contention.h --- a/rpython/translator/stm/src_stm/stm/contention.h +++ b/rpython/translator/stm/src_stm/stm/contention.h @@ -1,11 +1,14 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -static void write_write_contention_management(uintptr_t lock_idx); -static void write_read_contention_management(uint8_t other_segment_num); +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj); +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj); static void inevitable_contention_management(uint8_t other_segment_num); static inline bool is_abort(uintptr_t nursery_end) { - return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE + && nursery_end != NSE_SIGCOMMITSOON); } static inline bool is_aborting_now(uint8_t other_segment_num) { diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -15,13 +15,10 @@ #define EVENTUALLY(condition) \ { \ if (!(condition)) { \ - int _i; \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_acquire(lock_pages_privatizing[_i]); \ + acquire_privatization_lock(); \ if (!(condition)) \ stm_fatalerror("fails: " #condition); \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_release(lock_pages_privatizing[_i]); \ + release_privatization_lock(); \ } \ } #endif @@ -77,9 +74,15 @@ assert(lock_idx < sizeof(write_locks)); retry: if (write_locks[lock_idx] == 0) { + /* A lock to prevent reading garbage from + lookup_other_thread_recorded_marker() */ + acquire_marker_lock(STM_SEGMENT->segment_base); + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], - 0, lock_num))) + 0, lock_num))) { + release_marker_lock(STM_SEGMENT->segment_base); goto retry; + } dprintf_test(("write_slowpath %p -> mod_old\n", obj)); @@ -87,6 +90,15 @@ Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); + /* Add the current marker, recording where we wrote to this object */ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->modified_old_objects_markers = + list_append2(STM_PSEGMENT->modified_old_objects_markers, + marker[0], marker[1]); + + release_marker_lock(STM_SEGMENT->segment_base); + /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ @@ -128,7 +140,7 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - write_write_contention_management(lock_idx); + write_write_contention_management(lock_idx, obj); goto retry; } @@ -196,7 +208,13 @@ assert(STM_PSEGMENT->transaction_state == TS_NONE); change_timing_state(STM_TIME_RUN_CURRENT); STM_PSEGMENT->start_time = tl->_timing_cur_start; + STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; +#ifndef NDEBUG + STM_PSEGMENT->marker_inev[1] = 99999999999999999L; +#endif + if (jmpbuf == NULL) + marker_fetch_inev(); STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; @@ -224,12 +242,17 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers)); assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); +#ifndef NDEBUG + /* this should not be used when objects_pointing_to_nursery == NULL */ + STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; +#endif check_nursery_at_transaction_start(); } @@ -264,7 +287,7 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - write_read_contention_management(i); + write_read_contention_management(i, item); /* If we reach this point, we didn't abort, but maybe we had to wait for the other thread to commit. If we @@ -338,9 +361,12 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. + + Must be called with the privatization lock acquired. */ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(STM_PSEGMENT->privatization_lock == 1); uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; @@ -382,26 +408,9 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(memcmp(dst, src, copy_size) == 0); /* same page */ + assert(memcmp(dst, src, copy_size) == 0); /* same page */ } - /* Do a full memory barrier. We must make sure that other - CPUs see the changes we did to the shared page ("S", - above) before we check the other segments below with - is_private_page(). Otherwise, we risk the following: - this CPU writes "S" but the writes are not visible yet; - then it checks is_private_page() and gets false, and does - nothing more; just afterwards another CPU sets its own - private_page bit and copies the page; but it risks doing - so before seeing the "S" writes. - - XXX what is the cost of this? If it's high, then we - should reorganize the code so that we buffer the second - parts and do them by bunch of N, after just one call to - __sync_synchronize()... - */ - __sync_synchronize(); - for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; @@ -418,7 +427,7 @@ memcpy(dst, src, copy_size); } else { - EVENTUALLY(!memcmp(dst, src, copy_size)); /* same page */ + assert(!memcmp(dst, src, copy_size)); /* same page */ } } @@ -432,12 +441,15 @@ if (STM_PSEGMENT->large_overflow_objects == NULL) return; + acquire_privatization_lock(); LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, synchronize_object_now(item)); + release_privatization_lock(); } static void push_modified_to_other_segments(void) { + acquire_privatization_lock(); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, @@ -457,8 +469,10 @@ private pages as needed */ synchronize_object_now(item); })); + release_privatization_lock(); list_clear(STM_PSEGMENT->modified_old_objects); + list_clear(STM_PSEGMENT->modified_old_objects_markers); } static void _finish_transaction(int attribute_to) @@ -597,6 +611,7 @@ })); list_clear(pseg->modified_old_objects); + list_clear(pseg->modified_old_objects_markers); } static void abort_data_structures_from_segment_num(int segment_num) @@ -621,8 +636,9 @@ (int)pseg->transaction_state); } - /* look up and preserve the marker information as a string */ - marker_fetch_expand(pseg); + /* if we don't have marker information already, look up and preserve + the marker information from the shadowstack as a string */ + marker_default_for_abort(pseg); /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -706,6 +722,7 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); + marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -79,9 +79,17 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and - that need to be copied to other segments upon commit. */ + that need to be copied to other segments upon commit. Note that + every object takes three list items: the object, and two words for + the location marker. */ struct list_s *modified_old_objects; + /* For each entry in 'modified_old_objects', we have two entries + in the following list, which give the marker at the time we added + the entry to modified_old_objects. */ + struct list_s *modified_old_objects_markers; + uintptr_t modified_old_objects_markers_num_old; + /* List of out-of-nursery objects that may contain pointers to nursery objects. This is used to track the GC status: they are all objects outside the nursery on which an stm_write() occurred @@ -149,12 +157,31 @@ /* For sleeping contention management */ bool signal_when_done; + /* This lock is acquired when that segment calls synchronize_object_now. + On the rare event of a page_privatize(), the latter will acquire + all the locks in all segments. Otherwise, for the common case, + it's cheap. (The set of all 'privatization_lock' in all segments + works like one single read-write lock, with page_privatize() acquiring + the write lock; but this variant is more efficient for the case of + many reads / rare writes.) */ + uint8_t privatization_lock; + + /* This lock is acquired when we mutate 'modified_old_objects' but + we don't have the global mutex. It is also acquired during minor + collection. It protects against a different thread that tries to + get this segment's marker corresponding to some object, or to + expand the marker into a full description. */ + uint8_t marker_lock; + /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; struct stm_shadowentry_s *shadowstack_at_abort; + /* Already signalled to commit soon: */ + bool signalled_to_commit_soon; + /* For debugging */ #ifndef NDEBUG pthread_t running_pthread; @@ -162,6 +189,8 @@ /* Temporarily stores the marker information */ char marker_self[_STM_MARKER_LEN]; + char marker_other[_STM_MARKER_LEN]; + uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ }; enum /* safe_point */ { @@ -231,3 +260,31 @@ static void copy_object_to_shared(object_t *obj, int source_segment_num); static void synchronize_object_now(object_t *obj); + +static inline void acquire_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_acquire(*lock); +} + +static inline void release_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_release(*lock); +} + +static inline void acquire_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_acquire(*lock); +} + +static inline void release_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_release(*lock); +} diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -93,17 +93,20 @@ /* uncommon case: need to initialize some more pages */ spinlock_acquire(lock_growth_large); - if (addr + size > uninitialized_page_start) { + char *start = uninitialized_page_start; + if (addr + size > start) { uintptr_t npages; - npages = (addr + size - uninitialized_page_start) / 4096UL; + npages = (addr + size - start) / 4096UL; npages += GCPAGE_NUM_PAGES; - if (uninitialized_page_stop - uninitialized_page_start < - npages * 4096UL) { + if (uninitialized_page_stop - start < npages * 4096UL) { stm_fatalerror("out of memory!"); /* XXX */ } - setup_N_pages(uninitialized_page_start, npages); - __sync_synchronize(); - uninitialized_page_start += npages * 4096UL; + setup_N_pages(start, npages); + if (!__sync_bool_compare_and_swap(&uninitialized_page_start, + start, + start + npages * 4096UL)) { + stm_fatalerror("uninitialized_page_start changed?"); + } } spinlock_release(lock_growth_large); return addr; @@ -419,6 +422,23 @@ } } +static void mark_visit_from_markers(void) +{ + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + char *base = get_segment_base(j); + struct list_s *lst = get_priv_segment(j)->modified_old_objects_markers; + uintptr_t i; + for (i = list_count(lst); i > 0; i -= 2) { + mark_visit_object((object_t *)list_item(lst, i - 1), base); + } + if (get_priv_segment(j)->transaction_state == TS_INEVITABLE) { + uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; + mark_visit_object((object_t *)marker_inev_obj, base); + } + } +} + static void clean_up_segment_lists(void) { long i; @@ -521,6 +541,7 @@ /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); + mark_visit_from_markers(); mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c b/rpython/translator/stm/src_stm/stm/largemalloc.c --- a/rpython/translator/stm/src_stm/stm/largemalloc.c +++ b/rpython/translator/stm/src_stm/stm/largemalloc.c @@ -354,6 +354,9 @@ mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); +#ifndef NDEBUG + memset((char *)&mscan->d, 0xda, request_size); +#endif lm_unlock(); diff --git a/rpython/translator/stm/src_stm/stm/list.h b/rpython/translator/stm/src_stm/stm/list.h --- a/rpython/translator/stm/src_stm/stm/list.h +++ b/rpython/translator/stm/src_stm/stm/list.h @@ -34,6 +34,18 @@ #define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) +static inline struct list_s *list_append2(struct list_s *lst, + uintptr_t item0, uintptr_t item1) +{ + uintptr_t index = lst->count; + lst->count += 2; + if (UNLIKELY(index >= lst->last_allocated)) + lst = _list_grow(lst, index + 1); + lst->items[index + 0] = item0; + lst->items[index + 1] = item1; + return lst; +} + static inline void list_clear(struct list_s *lst) { @@ -67,6 +79,11 @@ lst->items[index] = newitem; } +static inline uintptr_t *list_ptr_to_item(struct list_s *lst, uintptr_t index) +{ + return &lst->items[index]; +} + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -12,38 +12,73 @@ const char *marker); -static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg) +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +{ + /* fetch the current marker from the tl's shadow stack, + and return it in 'marker[2]'. */ + struct stm_shadowentry_s *current = tl->shadowstack - 1; + struct stm_shadowentry_s *base = tl->shadowstack_base; + + /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is + a convenient stopper for the loop below but which shouldn't + be returned. */ + assert(base->ss == (object_t *)STM_STACK_MARKER_OLD); + + while (!(((uintptr_t)current->ss) & 1)) { + current--; + assert(current >= base); + } + if (current != base) { + /* found the odd marker */ + marker[0] = (uintptr_t)current[0].ss; + marker[1] = (uintptr_t)current[1].ss; + } + else { + /* no marker found */ + marker[0] = 0; + marker[1] = 0; + } +} + +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker) +{ + /* Expand the marker given by 'marker[2]' into a full string. This + works assuming that the marker was produced inside the segment + given by 'segment_base'. If that's from a different thread, you + must first acquire the corresponding 'marker_lock'. */ + assert(_has_mutex()); + outmarker[0] = 0; + if (marker[0] == 0) + return; /* no marker entry found */ + if (stmcb_expand_marker != NULL) { + stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], + outmarker, _STM_MARKER_LEN); + } +} + +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) { if (pseg->marker_self[0] != 0) return; /* already collected an entry */ - if (stmcb_expand_marker != NULL) { - stm_thread_local_t *tl = pseg->pub.running_thread; - struct stm_shadowentry_s *current = tl->shadowstack - 1; - struct stm_shadowentry_s *base = tl->shadowstack_base; - /* stop walking just before shadowstack_base, which contains - STM_STACK_MARKER_OLD which shouldn't be expanded */ - while (--current > base) { - uintptr_t x = (uintptr_t)current->ss; - if (x & 1) { - /* the stack entry is an odd number */ - stmcb_expand_marker(pseg->pub.segment_base, x, current[1].ss, - pseg->marker_self, _STM_MARKER_LEN); - - if (pseg->marker_self[0] != 0) - break; - } - } - } + uintptr_t marker[2]; + marker_fetch(pseg->pub.running_thread, marker); + marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); + pseg->marker_other[0] = 0; } char *_stm_expand_marker(void) { - struct stm_priv_segment_info_s *pseg = - get_priv_segment(STM_SEGMENT->segment_num); - pseg->marker_self[0] = 0; - marker_fetch_expand(pseg); - return pseg->marker_self; + /* for tests only! */ + static char _result[_STM_MARKER_LEN]; + uintptr_t marker[2]; + _result[0] = 0; + s_mutex_lock(); + marker_fetch(STM_SEGMENT->running_thread, marker); + marker_expand(marker, STM_SEGMENT->segment_base, _result); + s_mutex_unlock(); + return _result; } static void marker_copy(stm_thread_local_t *tl, @@ -65,6 +100,105 @@ tl->longest_marker_state = attribute_to; tl->longest_marker_time = time; memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); } pseg->marker_self[0] = 0; + pseg->marker_other[0] = 0; } + +static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, + uintptr_t marker[2]) +{ + assert(_has_mutex()); + + /* here, we acquired the other thread's marker_lock, which means that: + + (1) it has finished filling 'modified_old_objects' after it sets + up the write_locks[] value that we're conflicting with + + (2) it is not mutating 'modified_old_objects' right now (we have + the global mutex_lock at this point too). + */ + long i; + struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); + struct list_s *mlst = pseg->modified_old_objects; + struct list_s *mlstm = pseg->modified_old_objects_markers; + for (i = list_count(mlst); --i >= 0; ) { + if (list_item(mlst, i) == (uintptr_t)obj) { + assert(list_count(mlstm) == 2 * list_count(mlst)); + marker[0] = list_item(mlstm, i * 2 + 0); + marker[1] = list_item(mlstm, i * 2 + 1); + return; + } + } + marker[0] = 0; + marker[1] = 0; +} + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj) +{ + uintptr_t self_marker[2]; + uintptr_t other_marker[2]; + struct stm_priv_segment_info_s *my_pseg, *other_pseg; + + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + other_pseg = get_priv_segment(other_segment_num); + + char *my_segment_base = STM_SEGMENT->segment_base; + char *other_segment_base = get_segment_base(other_segment_num); + + acquire_marker_lock(other_segment_base); + + /* Collect the location for myself. It's usually the current + location, except in a write-read abort, in which case it's the + older location of the write. */ + if (kind == WRITE_READ_CONTENTION) + marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + else + marker_fetch(my_pseg->pub.running_thread, self_marker); + + /* Expand this location into either my_pseg->marker_self or + other_pseg->marker_other, depending on who aborts. */ + marker_expand(self_marker, my_segment_base, + abort_other ? other_pseg->marker_other + : my_pseg->marker_self); + + /* For some categories, we can also collect the relevant information + for the other segment. */ + switch (kind) { + case WRITE_WRITE_CONTENTION: + marker_fetch_obj_write(other_segment_num, obj, other_marker); + break; + case INEVITABLE_CONTENTION: + assert(abort_other == false); + other_marker[0] = other_pseg->marker_inev[0]; + other_marker[1] = other_pseg->marker_inev[1]; + break; + default: + other_marker[0] = 0; + other_marker[1] = 0; + break; + } + + marker_expand(other_marker, other_segment_base, + abort_other ? other_pseg->marker_self + : my_pseg->marker_other); + + if (abort_other && other_pseg->marker_self[0] == 0) { + if (kind == WRITE_READ_CONTENTION) + strcpy(other_pseg->marker_self, ""); + else + strcpy(other_pseg->marker_self, ""); + } + + release_marker_lock(other_segment_base); +} + +static void marker_fetch_inev(void) +{ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->marker_inev[0] = marker[0]; + STM_PSEGMENT->marker_inev[1] = marker[1]; +} diff --git a/rpython/translator/stm/src_stm/stm/marker.h b/rpython/translator/stm/src_stm/stm/marker.h --- a/rpython/translator/stm/src_stm/stm/marker.h +++ b/rpython/translator/stm/src_stm/stm/marker.h @@ -1,6 +1,13 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -static void marker_fetch_expand(struct stm_priv_segment_info_s *pseg); +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); +static void marker_fetch_inev(void); +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker); +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); static void marker_copy(stm_thread_local_t *tl, struct stm_priv_segment_info_s *pseg, enum stm_time_e attribute_to, double time); + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -216,7 +216,9 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) { + acquire_privatization_lock(); synchronize_object_now(obj); + release_privatization_lock(); } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); @@ -233,6 +235,18 @@ _collect_now(item)); } +static void collect_roots_from_markers(uintptr_t num_old) +{ + /* visit the marker objects */ + struct list_s *mlst = STM_PSEGMENT->modified_old_objects_markers; + STM_PSEGMENT->modified_old_objects_markers_num_old = list_count(mlst); + uintptr_t i, total = list_count(mlst); + assert((total & 1) == 0); + for (i = num_old + 1; i < total; i += 2) { + minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); + } +} + static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ @@ -282,6 +296,8 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); + acquire_marker_lock(STM_SEGMENT->segment_base); + STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { /* 'STM_PSEGMENT->overflow_number' is used now by this collection, @@ -297,6 +313,7 @@ /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ + uintptr_t num_old; if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); @@ -306,7 +323,12 @@ into objects_pointing_to_nursery, but instead we use the following shortcut */ collect_modified_old_objects(); + num_old = 0; } + else + num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; + + collect_roots_from_markers(num_old); collect_roots_in_nursery(); @@ -319,6 +341,8 @@ assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); + + release_marker_lock(STM_SEGMENT->segment_base); } static void minor_collection(bool commit) diff --git a/rpython/translator/stm/src_stm/stm/nursery.h b/rpython/translator/stm/src_stm/stm/nursery.h --- a/rpython/translator/stm/src_stm/stm/nursery.h +++ b/rpython/translator/stm/src_stm/stm/nursery.h @@ -2,6 +2,7 @@ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ #define NSE_SIGPAUSE STM_TIME_WAIT_OTHER +#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON static uint32_t highest_overflow_number; diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -109,18 +109,20 @@ { /* check this thread's 'pages_privatized' bit */ uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + volatile struct page_shared_s *ps = (volatile struct page_shared_s *) + &pages_privatized[pagenum - PAGE_FLAG_START]; if (ps->by_segment & bitmask) { /* the page is already privatized; nothing to do */ return; } -#ifndef NDEBUG - spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + spinlock_acquire(get_priv_segment(i)->privatization_lock); + } /* add this thread's 'pages_privatized' bit */ - __sync_fetch_and_add(&ps->by_segment, bitmask); + ps->by_segment |= bitmask; /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -134,9 +136,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); -#ifndef NDEBUG - spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + for (i = NB_SEGMENTS; i >= 1; i--) { + spinlock_release(get_priv_segment(i)->privatization_lock); + } } static void _page_do_reshare(long segnum, uintptr_t pagenum) diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -35,20 +35,6 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; -/* Rules for concurrent access to this array, possibly with is_private_page(): - - - we clear bits only during major collection, when all threads are - synchronized anyway - - - we set only the bit corresponding to our segment number, using - an atomic addition; and we do it _before_ we actually make the - page private. - - - concurrently, other threads checking the bits might (rarely) - get the answer 'true' to is_private_page() even though it is not - actually private yet. This inconsistency is in the direction - that we want for synchronize_object_now(). -*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -73,7 +59,3 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } - -#ifndef NDEBUG -static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; -#endif diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -79,6 +79,7 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->modified_old_objects_markers = list_create(); pr->young_weakrefs = list_create(); pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); @@ -116,6 +117,7 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->modified_old_objects_markers); list_free(pr->young_weakrefs); list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -3,6 +3,10 @@ #include #include +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + /* Each segment can be in one of three possible states, described by the segment variable 'safe_point': @@ -261,6 +265,18 @@ static bool _safe_points_requested = false; #endif +static void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) +{ + assert(_has_mutex()); + /* never overwrite abort signals or safepoint requests + (too messy to deal with) */ + if (!other_pseg->signalled_to_commit_soon + && !is_abort(other_pseg->pub.nursery_end) + && !pause_signalled) { + other_pseg->pub.nursery_end = NSE_SIGCOMMITSOON; + } +} + static void signal_everybody_to_pause_running(void) { assert(_safe_points_requested == false); @@ -324,7 +340,21 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) break; /* no safe point requested */ + if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); + } + + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + if (!pause_signalled) { + STM_SEGMENT->nursery_end = NURSERY_END; + break; + } + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + } assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + assert(pause_signalled); /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -26,10 +26,11 @@ return oldstate; } -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) { TIMING_CHANGE(tl, newstate); + return elasped; } static void timing_end_transaction(enum stm_time_e attribute_to) @@ -59,6 +60,7 @@ "wait write read", "wait inevitable", "wait other", + "sync commit soon", "bookkeeping", "minor gc", "major gc", diff --git a/rpython/translator/stm/src_stm/stm/timing.h b/rpython/translator/stm/src_stm/stm/timing.h --- a/rpython/translator/stm/src_stm/stm/timing.h +++ b/rpython/translator/stm/src_stm/stm/timing.h @@ -9,7 +9,7 @@ } static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -67,6 +67,7 @@ STM_TIME_WAIT_WRITE_READ, STM_TIME_WAIT_INEVITABLE, STM_TIME_WAIT_OTHER, + STM_TIME_SYNC_COMMIT_SOON, STM_TIME_BOOKKEEPING, STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, @@ -217,9 +218,13 @@ The "size rounded up" must be a multiple of 8 and at least 16. "Tracing" an object means enumerating all GC references in it, by invoking the callback passed as argument. + stmcb_commit_soon() is called when it is advised to commit + the transaction as soon as possible in order to avoid conflicts + or improve performance in general. */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +extern void stmcb_commit_soon(void); /* Allocate an object of the given size, which must be a multiple diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -140,6 +140,7 @@ STM_PUSH_ROOT(stm_thread_local, arg); while (1) { + long counter; if (pypy_stm_ready_atomic == 1) { /* Not in an atomic transaction; but it might be an inevitable @@ -156,11 +157,13 @@ declared below than this point only. */ while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } + counter = v_counter; pypy_stm_start_transaction(&jmpbuf, &v_counter); } else { /* In an atomic transaction */ assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); + counter = v_counter; } /* invoke the callback in the new transaction */ @@ -168,7 +171,7 @@ assert(v_old_shadowstack == stm_thread_local.shadowstack - 1); STM_PUSH_ROOT(stm_thread_local, arg); - long result = v_callback(arg, v_counter); + long result = v_callback(arg, counter); if (result <= 0) break; v_counter = 0; @@ -231,3 +234,5 @@ _pypy_stm_inev_state(); stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT"); } + +void stmcb_commit_soon(void) { /*XXX FIXME*/ } diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -130,6 +130,23 @@ data, dataerr = cbuilder.cmdexec('4 5000', err=True) assert 'check ok!' in data + def test_retry_counter_starts_at_zero(self): + # + def check(foobar, retry_counter): + print '<', retry_counter, '>' + return 0 + # + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) + def entry_point(argv): + perform_transaction(lltype.malloc(S)) + return 0 + # + t, cbuilder = self.compile(entry_point, backendopt=True) + data = cbuilder.cmdexec('a b c d') + assert '< 0 >\n' in data + def test_bug1(self): # def check(foobar, retry_counter): @@ -237,7 +254,6 @@ assert 'ok\n' in data def test_abort_info(self): - py.test.skip("goes away") class Parent(object): pass class Foobar(Parent): @@ -249,19 +265,12 @@ globf.xy = 100 + retry_counter def check(_, retry_counter): - rstm.abort_info_push(globf, ('[', 'xy', ']', 'yx')) setxy(globf, retry_counter) if retry_counter < 3: rstm.abort_and_retry() - # - last = rstm.charp_inspect_abort_info() - if last: - print rffi.charp2str(last) - else: - print 'got abort_info=NULL!' - print int(bool(rstm.charp_inspect_abort_info())) - # - rstm.abort_info_pop(2) + print rstm.longest_abort_info() + rstm.reset_longest_abort_info() + print rstm.longest_abort_info() return 0 PS = lltype.Ptr(lltype.GcStruct('S', ('got_exception', OBJECTPTR))) @@ -275,7 +284,10 @@ return 0 t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') - assert 'li102ee10:hi there 3e\n0\n' in data + # + # 6 == STM_TIME_RUN_ABORTED_OTHER + import re; r = re.compile(r'\(6, 0.00\d+, , \)\n\(0, 0.00+, , \)\n$') + assert r.match(data) def test_weakref(self): import weakref From noreply at buildbot.pypy.org Wed Apr 30 15:11:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:11:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Disable the STACK_MARKER here (it's not compatible with the current logic) Message-ID: <20140430131140.E558E1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71090:206ffc6cff43 Date: 2014-04-29 15:50 +0200 http://bitbucket.org/pypy/pypy/changeset/206ffc6cff43/ Log: Disable the STACK_MARKER here (it's not compatible with the current logic) until we figure out if it's really necessary. diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -136,7 +136,7 @@ stm_thread_local.shadowstack; #endif - STM_PUSH_ROOT(stm_thread_local, STM_STACK_MARKER_NEW); + //STM_PUSH_ROOT(stm_thread_local, STM_STACK_MARKER_NEW); STM_PUSH_ROOT(stm_thread_local, arg); while (1) { @@ -168,7 +168,7 @@ /* invoke the callback in the new transaction */ STM_POP_ROOT(stm_thread_local, arg); - assert(v_old_shadowstack == stm_thread_local.shadowstack - 1); + assert(v_old_shadowstack == stm_thread_local.shadowstack);// - 1); STM_PUSH_ROOT(stm_thread_local, arg); long result = v_callback(arg, counter); @@ -204,8 +204,8 @@ } STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */ - uintptr_t x = (uintptr_t)STM_POP_ROOT_RET(stm_thread_local); - assert(x == STM_STACK_MARKER_NEW || x == STM_STACK_MARKER_OLD); + //uintptr_t x = (uintptr_t)STM_POP_ROOT_RET(stm_thread_local); + //assert(x == STM_STACK_MARKER_NEW || x == STM_STACK_MARKER_OLD); assert(v_old_shadowstack == stm_thread_local.shadowstack); } From noreply at buildbot.pypy.org Wed Apr 30 15:11:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:11:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140430131142.0D1B61C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71091:24698a02507b Date: 2014-04-29 15:50 +0200 http://bitbucket.org/pypy/pypy/changeset/24698a02507b/ Log: merge heads diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -569,6 +569,7 @@ POP_r = insn(rex_nw, register(1), '\x58') POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) POP_m = insn(rex_nw, '\x8F', orbyte(0<<3), mem_reg_plus_const(1)) + POP_j = insn(rex_nw, '\x8F', orbyte(0<<3), abs_(1)) # note: the segment specified in LEA should always be SEGMENT_NO; # if instead you give it a SEGMENT_*S, it is ignored diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -261,3 +261,8 @@ s = CodeBuilder64() s.MOV8_jr((SEGMENT_GS, 51), ebx | BYTE_REG_FLAG) assert s.getvalue() == '\x65\x88\x1C\x25\x33\x00\x00\x00' + +def test_pop_j(): + s = CodeBuilder64() + s.POP_j((SEGMENT_GS, -440)) + assert s.getvalue() == '\x65\x8f\x04\x25\x48\xfe\xff\xff' From noreply at buildbot.pypy.org Wed Apr 30 15:11:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 15:11:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140430131143.1F8281C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71092:c7ff109cfc93 Date: 2014-04-30 15:10 +0200 http://bitbucket.org/pypy/pypy/changeset/c7ff109cfc93/ Log: merge heads From noreply at buildbot.pypy.org Wed Apr 30 18:58:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 30 Apr 2014 18:58:16 +0200 (CEST) Subject: [pypy-commit] pypy py3k: port some improvements from default Message-ID: <20140430165816.884811C33B3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71093:33694c682b75 Date: 2014-04-30 12:52 -0400 http://bitbucket.org/pypy/pypy/changeset/33694c682b75/ Log: port some improvements from default diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -13,9 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - try: + with fp: imp.load_module('_testcapi', fp, filename, description) - finally: - fp.close() except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -17,8 +17,8 @@ def compilecode(space, source, filename, cmd='exec'): w = space.wrap - w_code = space.builtin.call('compile', - space.wrapbytes(source), w(filename), w(cmd), w(0), w(0)) + w_code = space.builtin.call( + 'compile', space.wrapbytes(source), w(filename), w(cmd), w(0), w(0)) pycode = space.interp_w(eval.Code, w_code) return pycode diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -828,7 +828,7 @@ __kwdefaults__ = getset_func_kwdefaults, __annotations__ = getset_func_annotations, __globals__ = interp_attrproperty_w('w_func_globals', cls=Function), - __closure__ = GetSetProperty( Function.fget_func_closure ), + __closure__ = GetSetProperty(Function.fget_func_closure), __module__ = getset___module__, __weakref__ = make_weakref_descr(Function), ) diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -27,22 +27,11 @@ """ from pypy.interpreter.pyopcode import source_as_str ec = space.getexecutioncontext() - if flags & ~(ec.compiler.compiler_flags | - consts.PyCF_ONLY_AST | - consts.PyCF_DONT_IMPLY_DEDENT | - consts.PyCF_SOURCE_IS_UTF8): + if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | + consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) - flags |= consts.PyCF_SOURCE_IS_UTF8 - ast_node = source = None - if space.isinstance_w(w_source, space.gettypeobject(ast.AST.typedef)): - ast_node = space.interp_w(ast.mod, w_source) - ast_node.sync_app_attrs(space) - else: - source, flags = source_as_str(space, w_source, 'compile', - "string, bytes or AST", flags) - if not dont_inherit: caller = ec.gettopframe_nohidden() if caller: @@ -55,11 +44,18 @@ # XXX: optimize flag is not used - if ast_node is not None: + if space.isinstance_w(w_source, space.gettypeobject(ast.AST.typedef)): + ast_node = space.interp_w(ast.mod, w_source) + ast_node.sync_app_attrs(space) code = ec.compiler.compile_ast(ast_node, filename, mode, flags) - elif flags & consts.PyCF_ONLY_AST: - ast_node = ec.compiler.compile_to_ast(source, filename, mode, flags) - return space.wrap(ast_node) + return space.wrap(code) + + flags |= consts.PyCF_SOURCE_IS_UTF8 + source, flags = source_as_str(space, w_source, 'compile', + "string, bytes or AST", flags) + + if flags & consts.PyCF_ONLY_AST: + code = ec.compiler.compile_to_ast(source, filename, mode, flags) else: code = ec.compiler.compile(source, filename, mode, flags) return space.wrap(code) From noreply at buildbot.pypy.org Wed Apr 30 18:59:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 30 Apr 2014 18:59:23 +0200 (CEST) Subject: [pypy-commit] pypy default: reduce diff with py3k Message-ID: <20140430165923.4A3201C33B3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71094:62effbe1245d Date: 2014-04-30 12:53 -0400 http://bitbucket.org/pypy/pypy/changeset/62effbe1245d/ Log: reduce diff with py3k diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -27,6 +27,7 @@ consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) + if not dont_inherit: caller = ec.gettopframe_nohidden() if caller: @@ -37,8 +38,7 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - w_ast_type = space.gettypeobject(ast.AST.typedef) - if space.isinstance_w(w_source, w_ast_type): + if space.isinstance_w(w_source, space.gettypeobject(ast.AST.typedef)): ast_node = space.interp_w(ast.mod, w_source) ast_node.sync_app_attrs(space) code = ec.compiler.compile_ast(ast_node, filename, mode, flags) @@ -47,20 +47,20 @@ if space.isinstance_w(w_source, space.w_unicode): w_utf_8_source = space.call_method(w_source, "encode", space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) + source = space.str_w(w_utf_8_source) # This flag tells the parser to reject any coding cookies it sees. flags |= consts.PyCF_SOURCE_IS_UTF8 else: - str_ = space.readbuf_w(w_source).as_str() + source = space.readbuf_w(w_source).as_str() - if '\x00' in str_: + if '\x00' in source: raise OperationError(space.w_TypeError, space.wrap( "compile() expected string without null bytes")) if flags & consts.PyCF_ONLY_AST: - code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + code = ec.compiler.compile_to_ast(source, filename, mode, flags) else: - code = ec.compiler.compile(str_, filename, mode, flags) + code = ec.compiler.compile(source, filename, mode, flags) return space.wrap(code) From noreply at buildbot.pypy.org Wed Apr 30 19:01:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 30 Apr 2014 19:01:43 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into release Message-ID: <20140430170143.D0AC21C33B3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71095:ffd96bb654d8 Date: 2014-04-30 07:59 +0300 http://bitbucket.org/pypy/pypy/changeset/ffd96bb654d8/ Log: merge default into release diff too long, truncating to 2000 out of 6575 lines diff --git a/pypy/doc/config/objspace.usemodules.oracle.txt b/pypy/doc/config/objspace.usemodules.oracle.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.txt +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -58,7 +58,6 @@ math mmap operator - oracle parser posix pyexpat diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -92,7 +92,7 @@ * Support for OpenBSD * Code cleanup: we continue to prune out old and unused code, and to refactor - large parts of the codebase. We have sepearated rpython from the PyPy python + large parts of the codebase. We have separated rpython from the PyPy python interpreter, and rpython is seeing use in other dynamic language projects. * Support for precompiled headers in the build process for MSVC diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -321,10 +321,11 @@ limit -= len(self.keyword_names_w) for i in range(len(self.keywords)): if i < limit: - w_key = space.wrap(self.keywords[i]) + key = self.keywords[i] + space.setitem_str(w_kwds, key, self.keywords_w[i]) else: w_key = self.keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, self.keywords_w[i]) + space.setitem(w_kwds, w_key, self.keywords_w[i]) return w_args, w_kwds # JIT helper functions @@ -416,10 +417,10 @@ break else: if i < limit: - w_key = space.wrap(keywords[i]) + space.setitem_str(w_kwds, keywords[i], keywords_w[i]) else: w_key = keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, keywords_w[i]) + space.setitem(w_kwds, w_key, keywords_w[i]) # # ArgErr family of exceptions raised in case of argument mismatch. diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -93,6 +93,7 @@ def setitem(self, obj, key, value): obj[key] = value + setitem_str = setitem def getitem(self, obj, key): return obj[key] diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -448,6 +448,9 @@ self.descr_delitem(space, space.newslice(w_start, w_stop, space.w_None)) + def descr_iter(self, space): + return space.newseqiter(self) + def descr_add(self, space, w_other): raise NotImplementedError @@ -503,6 +506,7 @@ __setslice__ = interp2app(W_ArrayBase.descr_setslice), __delitem__ = interp2app(W_ArrayBase.descr_delitem), __delslice__ = interp2app(W_ArrayBase.descr_delslice), + __iter__ = interp2app(W_ArrayBase.descr_iter), __add__ = interpindirect2app(W_ArrayBase.descr_add), __iadd__ = interpindirect2app(W_ArrayBase.descr_inplace_add), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -697,6 +697,8 @@ for i in a: b.append(i) assert repr(b) == "array('i', [1, 2, 3])" + assert hasattr(b, '__iter__') + assert next(b.__iter__()) == 1 def test_lying_iterable(self): class lier(object): diff --git a/pypy/module/cpyext/patches/cx_Oracle.patch b/pypy/module/cpyext/patches/cx_Oracle.patch deleted file mode 100644 --- a/pypy/module/cpyext/patches/cx_Oracle.patch +++ /dev/null @@ -1,60 +0,0 @@ -Index: cx_Oracle.c -=================================================================== ---- cx_Oracle.c (r�vision 333) -+++ cx_Oracle.c (copie de travail) -@@ -65,6 +65,13 @@ - #define CXORA_BASE_EXCEPTION PyExc_StandardError - #endif - -+// define missing PyDateTime_DELTA macros -+#ifndef PYPY_VERSION -+PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) -+PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) -+PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds) -+#endif -+ - // define simple construct for determining endianness of the platform - // Oracle uses native encoding with OCI_UTF16 but bails when a BOM is written - #define IS_LITTLE_ENDIAN (int)*(unsigned char*) &one -@@ -138,6 +145,7 @@ - *exception = PyErr_NewException(buffer, baseException, NULL); - if (!*exception) - return -1; -+ Py_INCREF(*exception); - return PyModule_AddObject(module, name, *exception); - } - -Index: IntervalVar.c -=================================================================== ---- IntervalVar.c (r�vision 333) -+++ IntervalVar.c (copie de travail) -@@ -121,7 +121,7 @@ - unsigned pos, // array position to set - PyObject *value) // value to set - { -- sb4 hours, minutes, seconds; -+ sb4 days, hours, minutes, seconds, microseconds; - PyDateTime_Delta *delta; - sword status; - -@@ -131,13 +131,16 @@ - } - - delta = (PyDateTime_Delta*) value; -- hours = (sb4) delta->seconds / 3600; -- seconds = delta->seconds - hours * 3600; -+ days = PyDateTime_DELTA_GET_DAYS(delta); -+ seconds = PyDateTime_DELTA_GET_SECONDS(delta); -+ hours = (sb4) seconds / 3600; -+ seconds -= hours * 3600; - minutes = (sb4) seconds / 60; - seconds -= minutes * 60; -+ microseconds = PyDateTime_DELTA_GET_MICROSECONDS(delta); - status = OCIIntervalSetDaySecond(var->environment->handle, -- var->environment->errorHandle, delta->days, hours, minutes, -- seconds, delta->microseconds, var->data[pos]); -+ var->environment->errorHandle, days, hours, minutes, -+ seconds, microseconds, var->data[pos]); - if (Environment_CheckForError(var->environment, status, - "IntervalVar_SetValue()") < 0) - return -1; diff --git a/pypy/module/oracle/__init__.py b/pypy/module/oracle/__init__.py deleted file mode 100644 --- a/pypy/module/oracle/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -from pypy.interpreter.mixedmodule import MixedModule - -class Module(MixedModule): - applevel_name = 'cx_Oracle' - - interpleveldefs = { - 'connect': 'interp_connect.W_Connection', - 'Connection': 'interp_connect.W_Connection', - 'NUMBER': 'interp_variable.VT_Float', - 'STRING': 'interp_variable.VT_String', - 'UNICODE': 'interp_variable.VT_NationalCharString', - 'DATETIME': 'interp_variable.VT_DateTime', - 'DATE': 'interp_variable.VT_Date', - 'TIMESTAMP': 'interp_variable.VT_Timestamp', - 'INTERVAL': 'interp_variable.VT_Interval', - 'BINARY': 'interp_variable.VT_Binary', - 'LONG_STRING': 'interp_variable.VT_LongString', - 'LONG_BINARY': 'interp_variable.VT_LongBinary', - 'FIXED_CHAR': 'interp_variable.VT_FixedChar', - 'FIXED_UNICODE': 'interp_variable.VT_FixedNationalChar', - 'CURSOR': 'interp_variable.VT_Cursor', - 'BLOB': 'interp_variable.VT_BLOB', - 'CLOB': 'interp_variable.VT_CLOB', - 'OBJECT': 'interp_variable.VT_Object', - 'Variable': 'interp_variable.W_Variable', - 'SessionPool': 'interp_pool.W_SessionPool', - } - - appleveldefs = { - 'version': 'app_oracle.version', - 'paramstyle': 'app_oracle.paramstyle', - 'makedsn': 'app_oracle.makedsn', - 'TimestampFromTicks': 'app_oracle.TimestampFromTicks', - } - for name in """DataError DatabaseError Error IntegrityError InterfaceError - InternalError NotSupportedError OperationalError - ProgrammingError Warning""".split(): - appleveldefs[name] = "app_oracle.%s" % (name,) - - def startup(self, space): - from pypy.module.oracle.interp_error import get - state = get(space) - state.startup(space) - (state.w_DecimalType, - state.w_DateTimeType, state.w_DateType, state.w_TimedeltaType, - ) = space.fixedview(space.appexec([], """(): - import decimal, datetime - return (decimal.Decimal, - datetime.datetime, datetime.date, datetime.timedelta) - """)) - space.setattr(space.wrap(self), - space.wrap("Timestamp"), state.w_DateTimeType) - space.setattr(space.wrap(self), - space.wrap("Date"), state.w_DateType) diff --git a/pypy/module/oracle/app_oracle.py b/pypy/module/oracle/app_oracle.py deleted file mode 100644 --- a/pypy/module/oracle/app_oracle.py +++ /dev/null @@ -1,42 +0,0 @@ -version = '5.0.0' -paramstyle = 'named' - -class Warning(StandardError): - pass - -class Error(StandardError): - pass - -class InterfaceError(Error): - pass - -class DatabaseError(Error): - pass - -class DataError(DatabaseError): - pass - -class OperationalError(DatabaseError): - pass - -class IntegrityError(DatabaseError): - pass - -class InternalError(DatabaseError): - pass - -class ProgrammingError(DatabaseError): - pass - -class NotSupportedError(DatabaseError): - pass - - -def makedsn(host, port, sid): - return ("(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=" - "(PROTOCOL=TCP)(HOST=%s)(PORT=%s)))" - "(CONNECT_DATA=(SID=%s)))" % (host, port, sid)) - -def TimestampFromTicks(*args): - import datetime - return datetime.datetime.fromtimestamp(*args) diff --git a/pypy/module/oracle/config.py b/pypy/module/oracle/config.py deleted file mode 100644 --- a/pypy/module/oracle/config.py +++ /dev/null @@ -1,54 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.oracle import roci - -WITH_UNICODE = False - -MAX_STRING_CHARS = 4000 -MAX_BINARY_BYTES = 4000 - -if WITH_UNICODE: - CHARSETID = roci.OCI_UTF16ID - BYTES_PER_CHAR = 2 - def string_w(space, w_obj): - return space.unicode_w(w_obj) -else: - def string_w(space, w_obj): - return space.str_w(w_obj) - - def w_string(space, buf, len=-1): - #assert type(len) is int - if len < 0: - return space.wrap(rffi.charp2str(buf)) - else: - return space.wrap(rffi.charpsize2str(buf, len)) - CHARSETID = 0 - BYTES_PER_CHAR = 1 - - class StringBuffer: - "Fill a char* buffer with data, suitable to pass to Oracle functions" - def __init__(self): - self.ptr = lltype.nullptr(roci.oratext.TO) - self.size = 0 - - def fill(self, space, w_value): - if w_value is None or space.is_w(w_value, space.w_None): - self.clear() - else: - strvalue = space.str_w(w_value) - self.ptr = rffi.str2charp(strvalue) - self.size = len(strvalue) - - def fill_with_unicode(self, space, w_value): - if w_value is None or space.is_w(w_value, space.w_None): - self.clear() - else: - # XXX ucs2 only probably - univalue = space.unicode_w(w_value) - self.ptr = rffi.cast(roci.oratext, rffi.unicode2wcharp(univalue)) - self.size = len(univalue) * 2 - - def clear(self): - if self.ptr: - rffi.free_charp(self.ptr) - self.ptr = lltype.nullptr(roci.oratext.TO) - self.size = 0 diff --git a/pypy/module/oracle/conftest.py b/pypy/module/oracle/conftest.py deleted file mode 100644 --- a/pypy/module/oracle/conftest.py +++ /dev/null @@ -1,9 +0,0 @@ -import os - -def pytest_addoption(parser): - group = parser.getgroup("Oracle module options") - group.addoption('--oracle-home', dest="oracle_home", - help="Home directory of Oracle client installation", - default=os.environ.get("ORACLE_HOME")) - group.addoption('--oracle-connect', dest="oracle_connect", - help="connect string (user/pwd at db) used for tests") diff --git a/pypy/module/oracle/interp_connect.py b/pypy/module/oracle/interp_connect.py deleted file mode 100644 --- a/pypy/module/oracle/interp_connect.py +++ /dev/null @@ -1,551 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.typedef import (TypeDef, interp_attrproperty_w, - GetSetProperty) -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError -from rpython.rtyper.lltypesystem import rffi, lltype - -from pypy.module.oracle import roci, interp_error -from pypy.module.oracle.config import string_w, StringBuffer, MAX_STRING_CHARS -from pypy.module.oracle.interp_environ import Environment -from pypy.module.oracle.interp_cursor import W_Cursor -from pypy.module.oracle.interp_pool import W_SessionPool -from pypy.module.oracle.interp_variable import VT_String - - -class W_Connection(W_Root): - def __init__(self): - self.commitMode = roci.OCI_DEFAULT - self.environment = None - self.autocommit = False - - self.sessionHandle = lltype.nullptr(roci.OCISession.TO) - self.serverHandle = lltype.nullptr(roci.OCIServer.TO) - - self.w_inputTypeHandler = None - self.w_outputTypeHandler = None - - self.w_version = None - self.release = False - - - @unwrap_spec(mode=int, handle=int, - threaded=bool, twophase=bool, events=bool, - purity=bool) - def descr_new(space, w_subtype, - w_user=None, - w_password=None, - w_dsn=None, - mode=roci.OCI_DEFAULT, - handle=0, # XXX should be a ptr type - w_pool=None, - threaded=False, - twophase=False, - events=False, - w_cclass=None, - purity=0, - w_newpassword=None): - self = space.allocate_instance(W_Connection, w_subtype) - W_Connection.__init__(self) - - # set up the environment - if w_pool: - pool = space.interp_w(W_SessionPool, w_pool) - self.environment = pool.environment.clone() - else: - pool = None - self.environment = Environment.create(space, threaded, events) - - self.w_username = w_user - self.w_password = w_password - self.w_tnsentry = w_dsn - - # perform some parsing, if necessary - if (self.w_username and not self.w_password and - space.is_true(space.contains(self.w_username, space.wrap('/')))): - (self.w_username, self.w_password) = space.listview( - space.call_method(self.w_username, 'split', - space.wrap('/'), space.wrap(1))) - - if (self.w_password and not self.w_tnsentry and - space.is_true(space.contains(self.w_password, space.wrap('@')))): - (self.w_password, self.w_tnsentry) = space.listview( - space.call_method(self.w_password, 'split', - space.wrap('@'), space.wrap(1))) - - if pool or w_cclass is not None: - self.getConnection(space, pool, w_cclass, purity) - else: - self.connect(space, mode, twophase) - return space.wrap(self) - - def __del__(self): - self.enqueue_for_destruction(self.environment.space, - W_Connection.destructor, - '__del__ method of ') - - def destructor(self): - assert isinstance(self, W_Connection) - if self.release: - roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - roci.OCISessionRelease( - self.handle, self.environment.errorHandle, - None, 0, roci.OCI_DEFAULT) - else: - if self.sessionHandle: - roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - roci.OCISessionEnd( - self.handle, self.environment.errorHandle, - self.sessionHandle, roci.OCI_DEFAULT) - if self.serverHandle: - roci.OCIServerDetach( - self.serverHandle, self.environment.errorHandle, - roci.OCI_DEFAULT) - - def connect(self, space, mode, twophase): - stringBuffer = StringBuffer() - - # allocate the server handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIServer).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SERVER, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate server handle") - self.serverHandle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - # attach to the server - stringBuffer.fill(space, self.w_tnsentry) - try: - status = roci.OCIServerAttach( - self.serverHandle, - self.environment.errorHandle, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Connect(): server attach") - finally: - stringBuffer.clear() - - # allocate the service context handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISvcCtx).TO, - 1, flavor='raw') - - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SVCCTX, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate service context handle") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - # set attribute for server handle - status = roci.OCIAttrSet( - self.handle, roci.OCI_HTYPE_SVCCTX, - self.serverHandle, 0, - roci.OCI_ATTR_SERVER, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set server handle") - - # set the internal and external names; these are needed for global - # transactions but are limited in terms of the lengths of the strings - if twophase: - status = roci.OCIAttrSet( - self.serverHandle, roci.OCI_HTYPE_SERVER, - "cx_Oracle", 0, - roci.OCI_ATTR_INTERNAL_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set internal name") - status = roci.OCIAttrSet( - self.serverHandle, roci.OCI_HTYPE_SERVER, - "cx_Oracle", 0, - roci.OCI_ATTR_EXTERNAL_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set external name") - - # allocate the session handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISession).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SESSION, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate session handle") - self.sessionHandle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - credentialType = roci.OCI_CRED_EXT - - # set user name in session handle - stringBuffer.fill(space, self.w_username) - try: - if stringBuffer.size > 0: - credentialType = roci.OCI_CRED_RDBMS - status = roci.OCIAttrSet( - self.sessionHandle, - roci.OCI_HTYPE_SESSION, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_USERNAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set user name") - finally: - stringBuffer.clear() - - # set password in session handle - stringBuffer.fill(space, self.w_password) - try: - if stringBuffer.size > 0: - credentialType = roci.OCI_CRED_RDBMS - status = roci.OCIAttrSet( - self.sessionHandle, - roci.OCI_HTYPE_SESSION, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_PASSWORD, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set password") - finally: - stringBuffer.clear() - - # set the session handle on the service context handle - status = roci.OCIAttrSet( - self.handle, roci.OCI_HTYPE_SVCCTX, - self.sessionHandle, 0, - roci.OCI_ATTR_SESSION, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set session handle") - - # if a new password has been specified, change it which will also - # establish the session - - # begin the session - status = roci.OCISessionBegin( - self.handle, self.environment.errorHandle, - self.sessionHandle, credentialType, mode) - try: - self.environment.checkForError( - status, "Connection_Connect(): begin session") - except: - self.sessionHandle = lltype.nullptr(roci.OCISession.TO) - raise - - def getConnection(self, space, pool, w_cclass, purity): - """Get a connection using the OCISessionGet() interface - rather than using the low level interface for connecting.""" - - proxyCredentials = False - authInfo = lltype.nullptr(roci.OCIAuthInfo.TO) - - if pool: - w_dbname = pool.w_name - mode = roci.OCI_SESSGET_SPOOL - if not pool.homogeneous and pool.w_username and self.w_username: - proxyCredentials = space.is_true(space.ne(pool.w_username, self.w_username)) - mode |= roci.OCI_SESSGET_CREDPROXY - else: - w_dbname = self.w_tnsentry - mode = roci.OCI_SESSGET_STMTCACHE - - stringBuffer = StringBuffer() - - # set up authorization handle, if needed - if not pool or w_cclass or proxyCredentials: - # create authorization handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIAuthInfo).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, - roci.OCI_HTYPE_AUTHINFO, - 0, lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_GetConnection(): allocate handle") - - authInfo = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - externalCredentials = True - - # set the user name, if applicable - stringBuffer.fill(space, self.w_username) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_USERNAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set user name") - finally: - stringBuffer.clear() - - # set the password, if applicable - stringBuffer.fill(space, self.w_password) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_PASSWORD, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set password") - finally: - stringBuffer.clear() - - # if no user name or password are set, using external credentials - if not pool and externalCredentials: - mode |= roci.OCI_SESSGET_CREDEXT - - # set the connection class, if applicable - if roci.OCI_ATTR_CONNECTION_CLASS is not None: - stringBuffer.fill(space, w_cclass) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_CONNECTION_CLASS, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Connection_GetConnection(): set connection class") - finally: - stringBuffer.clear() - - # set the purity, if applicable - if (roci.OCI_ATTR_PURITY is not None - and purity != roci.OCI_ATTR_PURITY_DEFAULT): - purityptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - purityptr[0] = rffi.cast(roci.ub4, purity) - try: - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - rffi.cast(roci.dvoidp, purityptr), - rffi.sizeof(roci.ub4), - roci.OCI_ATTR_PURITY, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set purity") - finally: - lltype.free(purityptr, flavor='raw') - - # acquire the new session - stringBuffer.fill(space, w_dbname) - foundptr = lltype.malloc(rffi.CArrayPtr(roci.boolean).TO, - 1, flavor='raw') - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISvcCtx).TO, - 1, flavor='raw') - try: - status = roci.OCISessionGet( - self.environment.handle, - self.environment.errorHandle, - handleptr, - authInfo, - stringBuffer.ptr, stringBuffer.size, - None, 0, - lltype.nullptr(roci.Ptr(roci.oratext).TO), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - foundptr, - mode) - self.environment.checkForError( - status, "Connection_GetConnection(): get connection") - - self.handle = handleptr[0] - finally: - stringBuffer.clear() - lltype.free(foundptr, flavor='raw') - lltype.free(handleptr, flavor='raw') - - # eliminate the authorization handle immediately, if applicable - if authInfo: - roci.OCIHandleFree(authInfo, roci.OCI_HTYPE_AUTHINFO) - - # copy members in the case where a pool is being used - if pool: - if not proxyCredentials: - self.w_username = pool.w_username - self.w_password = pool.w_password - self.w_tnsentry = pool.w_tnsentry - self.sessionPool = pool - - self.release = True - - def _checkConnected(self, space): - if not self.handle: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap("not connected")) - - def close(self, space): - # make sure we are actually connnected - self._checkConnected(space) - - # perform a rollback - status = roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Close(): rollback") - - # logoff of the server - if self.sessionHandle: - status = roci.OCISessionEnd( - self.handle, self.environment.errorHandle, - self.sessionHandle, roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Close(): end session") - roci.OCIHandleFree(self.handle, roci.OCI_HTYPE_SVCCTX) - - self.handle = lltype.nullptr(roci.OCISvcCtx.TO) - - def commit(self, space): - # make sure we are actually connected - self._checkConnected(space) - - status = roci.OCITransCommit( - self.handle, self.environment.errorHandle, - self.commitMode) - self.environment.checkForError( - status, "Connection_Commit()") - - self.commitMode = roci.OCI_DEFAULT - - def rollback(self, space): - # make sure we are actually connected - self._checkConnected(space) - - status = roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Rollback()") - - def newCursor(self, space): - return space.wrap(W_Cursor(space, self)) - - def _getCharacterSetName(self, space, attribute): - # get character set id - charsetIdPtr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.environment.handle, roci.OCI_HTYPE_ENV, - rffi.cast(roci.dvoidp, charsetIdPtr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - attribute, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetCharacterSetName(): get charset id") - charsetId = charsetIdPtr[0] - finally: - lltype.free(charsetIdPtr, flavor='raw') - - # get character set name - charsetname_buf, charsetname = rffi.alloc_buffer(roci.OCI_NLS_MAXBUFSZ) - try: - status = roci.OCINlsCharSetIdToName( - self.environment.handle, - charsetname_buf, roci.OCI_NLS_MAXBUFSZ, - charsetId) - self.environment.checkForError( - status, - "Connection_GetCharacterSetName(): get Oracle charset name") - - ianacharset_buf, ianacharset = rffi.alloc_buffer( - roci.OCI_NLS_MAXBUFSZ) - - try: - # get IANA character set name - status = roci.OCINlsNameMap( - self.environment.handle, - ianacharset_buf, roci.OCI_NLS_MAXBUFSZ, - charsetname_buf, roci.OCI_NLS_CS_ORA_TO_IANA) - self.environment.checkForError( - status, - "Connection_GetCharacterSetName(): translate NLS charset") - charset = rffi.charp2str(ianacharset_buf) - finally: - rffi.keep_buffer_alive_until_here(ianacharset_buf, ianacharset) - finally: - rffi.keep_buffer_alive_until_here(charsetname_buf, charsetname) - return space.wrap(charset) - - def get_encoding(self, space): - return self._getCharacterSetName(space, roci.OCI_ATTR_ENV_CHARSET_ID) - def get_nationalencoding(self, space): - return self._getCharacterSetName(space, roci.OCI_ATTR_ENV_CHARSET_ID) - def get_maxbytespercharacter(self, space): - return space.wrap(self.environment.maxBytesPerCharacter) - - def get_version(self, space): - # if version has already been determined, no need to determine again - if self.w_version: - return self.w_version - - # allocate a cursor to retrieve the version - cursor = W_Cursor(space, self) - - # allocate version and compatibility variables - versionVar = VT_String(cursor, cursor.arraySize, MAX_STRING_CHARS) - compatVar = VT_String(cursor, cursor.arraySize, MAX_STRING_CHARS) - - # call stored procedure - cursor._call(space, "dbms_utility.db_version", - None, space.newlist([space.wrap(versionVar), - space.wrap(compatVar)])) - - # retrieve value - self.w_version = versionVar.getValue(space, 0) - return self.w_version - -W_Connection.typedef = TypeDef( - "Connection", - __new__ = interp2app(W_Connection.descr_new.im_func), - username = interp_attrproperty_w('w_username', W_Connection), - password = interp_attrproperty_w('w_password', W_Connection), - tnsentry = interp_attrproperty_w('w_tnsentry', W_Connection), - - close = interp2app(W_Connection.close), - commit = interp2app(W_Connection.commit), - rollback = interp2app(W_Connection.rollback), - - cursor = interp2app(W_Connection.newCursor), - - encoding = GetSetProperty(W_Connection.get_encoding), - nationalencoding = GetSetProperty(W_Connection.get_nationalencoding), - maxBytesPerCharacter = GetSetProperty(W_Connection.get_maxbytespercharacter), - version = GetSetProperty(W_Connection.get_version), - ) diff --git a/pypy/module/oracle/interp_cursor.py b/pypy/module/oracle/interp_cursor.py deleted file mode 100644 --- a/pypy/module/oracle/interp_cursor.py +++ /dev/null @@ -1,1094 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.typedef import interp_attrproperty, interp_attrproperty_w -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rtyper.lltypesystem import rffi, lltype - -from pypy.module.oracle import roci, interp_error -from pypy.module.oracle.config import w_string, string_w, StringBuffer -from pypy.module.oracle import interp_variable -from pypy.module.oracle.interp_error import get - -# XXX are those "assert isinstance(xxx, interp_variable.W_Variable)" necessary? -# the bindList should annotate to SomeList(SomeInstance(W_Variable)) - -class W_Cursor(W_Root): - def __init__(self, space, connection): - self.connection = connection - self.environment = connection.environment - - self.w_statement = None - self.statementType = -1 - self.handle = lltype.nullptr(roci.OCIStmt.TO) - self.isOpen = True - self.isOwned = False - - self.setInputSizes = False - self.arraySize = 50 - self.fetchArraySize = 50 - self.bindArraySize = 1 - self.bindList = None - self.bindDict = None - self.numbersAsStrings = False - self.outputSize = -1 - self.outputSizeColumn = -1 - - self.w_inputTypeHandler = None - self.w_outputTypeHandler = None - self.w_rowFactory = None - - def execute(self, space, w_stmt, __args__): - args_w, kw_w = __args__.unpack() - - if space.is_w(w_stmt, space.w_None): - w_stmt = None - - if len(args_w) > 1: - raise OperationError( - space.w_TypeError, - space.wrap("Too many arguments")) - elif len(args_w) == 1: - if len(kw_w) > 0: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap( - "expecting argument or keyword arguments, not both")) - w_vars = args_w[0] - elif len(kw_w) > 0: - w_vars = space.newdict() - for key, w_value in kw_w.iteritems(): - space.setitem(w_vars, space.wrap(key), w_value) - else: - w_vars = None - - # make sure the cursor is open - self._checkOpen(space) - - return self._execute(space, w_stmt, w_vars) - - def prepare(self, space, w_stmt, w_tag=None): - # make sure the cursor is open - self._checkOpen(space) - - # prepare the statement - self._internalPrepare(space, w_stmt, w_tag) - - def _execute(self, space, w_stmt, w_vars): - - # prepare the statement, if applicable - self._internalPrepare(space, w_stmt, None) - - # perform binds - if w_vars is None: - pass - elif space.isinstance_w(w_vars, space.w_dict): - self._setBindVariablesByName(space, w_vars, 1, 0, 0) - else: - self._setBindVariablesByPos(space, w_vars, 1, 0, 0) - self._performBind(space) - - # execute the statement - isQuery = self.statementType == roci.OCI_STMT_SELECT - if isQuery: - numIters = 0 - else: - numIters = 1 - self._internalExecute(space, numIters=numIters) - - # perform defines, if necessary - if isQuery and self.fetchVariables is None: - self._performDefine() - - # reset the values of setoutputsize() - self.outputSize = -1 - self.outputSizeColumn = -1 - - # for queries, return the cursor for convenience - if isQuery: - return space.wrap(self) - - # for all other statements, simply return None - return space.w_None - - def executemany(self, space, w_stmt, w_list_of_args): - if space.is_w(w_stmt, space.w_None): - w_stmt = None - if not space.isinstance_w(w_list_of_args, space.w_list): - raise OperationError( - space.w_TypeError, - space.wrap("list expected")) - - # make sure the cursor is open - self._checkOpen(space) - - # prepare the statement - self._internalPrepare(space, w_stmt, None) - - # queries are not supported as the result is undefined - if self.statementType == roci.OCI_STMT_SELECT: - raise OperationError( - get(space).w_NotSupportedError, - space.wrap("queries not supported: results undefined")) - - # perform binds - args_w = space.listview(w_list_of_args) - numrows = len(args_w) - for i in range(numrows): - w_arguments = args_w[i] - deferred = i < numrows - 1 - if space.isinstance_w(w_arguments, space.w_dict): - self._setBindVariablesByName( - space, w_arguments, numrows, i, deferred) - else: - self._setBindVariablesByPos( - space, w_arguments, numrows, i, deferred) - self._performBind(space) - - # execute the statement, but only if the number of rows is greater than - # zero since Oracle raises an error otherwise - if numrows > 0: - self._internalExecute(space, numIters=numrows) - - def close(self, space): - # make sure we are actually open - self._checkOpen(space) - - # close the cursor - self.freeHandle(space, raiseError=True) - - self.isOpen = False - self.handle = lltype.nullptr(roci.OCIStmt.TO) - - @unwrap_spec(name=str) - def callfunc(self, space, name, w_returnType, w_parameters=None): - retvar = interp_variable.newVariableByType(space, self, w_returnType, 1) - if space.is_none(w_parameters): - w_parameters = None - - self._call(space, name, retvar, w_parameters) - - # determine the results - return retvar.getValue(space, 0) - - @unwrap_spec(name=str) - def callproc(self, space, name, w_parameters=None): - if space.is_none(w_parameters): - w_parameters = None - - self._call(space, name, None, w_parameters) - - # create the return value - ret_w = [] - if self.bindList: - for v in self.bindList: - assert isinstance(v, interp_variable.W_Variable) - ret_w.append(v.getValue(space, 0)) - return space.newlist(ret_w) - - def _call(self, space, name, retvar, w_args): - # determine the number of arguments passed - if w_args: - numArguments = space.len_w(w_args) - else: - numArguments = 0 - - # make sure we are actually open - self._checkOpen(space) - - # add the return value, if applicable - if retvar: - offset = 1 - w_vars = space.newlist([retvar]) - if w_args: - space.call_method(w_vars, "extend", w_args) - else: - offset = 0 - w_vars = w_args - - # build up the statement - args = ', '.join([':%d' % (i + offset + 1,) - for i in range(numArguments)]) - if retvar: - stmt = "begin :1 := %s(%s); end;" % (name, args) - else: - stmt = "begin %s(%s); end;" % (name, args) - - self._execute(space, space.wrap(stmt), w_vars) - - def _checkOpen(self, space): - if not self.isOpen: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap("not open")) - - def allocateHandle(self): - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIStmt).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_STMT, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Cursor_New()") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - self.isOwned = True - - def freeHandle(self, space, raiseError=True): - if not self.handle: - return - if self.isOwned: - roci.OCIHandleFree(self.handle, roci.OCI_HTYPE_STMT) - elif self.connection.handle: - tagBuffer = StringBuffer() - tagBuffer.fill(space, self.w_statementTag) - try: - status = roci.OCIStmtRelease( - self.handle, self.environment.errorHandle, - tagBuffer.ptr, tagBuffer.size, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Cursor_FreeHandle()") - finally: - tagBuffer.clear() - - def _internalPrepare(self, space, w_stmt, w_tag): - # make sure we don't get a situation where nothing is to be executed - if w_stmt is None and self.w_statement is None: - raise OperationError( - interp_error.get(space).w_ProgrammingError, - space.wrap("no statement specified " - "and no prior statement prepared")) - - # nothing to do if the statement is identical to the one already stored - # but go ahead and prepare anyway for create, alter and drop statments - if w_stmt is None or w_stmt == self.w_statement: - if self.statementType not in (roci.OCI_STMT_CREATE, - roci.OCI_STMT_DROP, - roci.OCI_STMT_ALTER): - return - w_stmt = self.w_statement - else: - self.w_statement = w_stmt - - # release existing statement, if necessary - self.w_statementTag = w_tag - self.freeHandle(space) - - # prepare statement - self.isOwned = False - handleptr = lltype.malloc(roci.Ptr(roci.OCIStmt).TO, - 1, flavor='raw') - stmtBuffer = StringBuffer() - tagBuffer = StringBuffer() - stmtBuffer.fill(space, w_stmt) - tagBuffer.fill(space, w_tag) - try: - status = roci.OCIStmtPrepare2( - self.connection.handle, handleptr, - self.environment.errorHandle, - stmtBuffer.ptr, stmtBuffer.size, - tagBuffer.ptr, tagBuffer.size, - roci.OCI_NTV_SYNTAX, roci.OCI_DEFAULT) - - self.environment.checkForError( - status, "Connection_InternalPrepare(): prepare") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - stmtBuffer.clear() - tagBuffer.clear() - - # clear bind variables, if applicable - if not self.setInputSizes: - self.bindList = None - self.bindDict = None - - # clear row factory, if applicable - self.rowFactory = None - - # determine if statement is a query - self._getStatementType() - - def _setErrorOffset(self, space, e): - if e.match(space, get(space).w_DatabaseError): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, 1, flavor='raw') - try: - roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARSE_ERROR_OFFSET, - self.environment.errorHandle) - e.offset = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - def _internalExecute(self, space, numIters): - if self.connection.autocommit: - mode = roci.OCI_COMMIT_ON_SUCCESS - else: - mode = roci.OCI_DEFAULT - - status = roci.OCIStmtExecute( - self.connection.handle, - self.handle, - self.environment.errorHandle, - numIters, 0, - lltype.nullptr(roci.OCISnapshot.TO), - lltype.nullptr(roci.OCISnapshot.TO), - mode) - try: - self.environment.checkForError( - status, "Cursor_InternalExecute()") - except OperationError, e: - self._setErrorOffset(space, e) - raise - finally: - self._setRowCount() - - def _getStatementType(self): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_STMT_TYPE, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_GetStatementType()") - self.statementType = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - self.fetchVariables = None - - def getDescription(self, space): - "Return a list of 7-tuples consisting of the description of " - "the define variables" - - # make sure the cursor is open - self._checkOpen(space) - - # fixup bound cursor, if necessary - self._fixupBoundCursor() - - # if not a query, return None - if self.statementType != roci.OCI_STMT_SELECT: - return - - # determine number of items in select-list - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub1).TO, 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARAM_COUNT, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_GetDescription()") - numItems = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - return space.newlist( - [space.newtuple(self._itemDescription(space, i + 1)) - for i in range(numItems)]) - - def _itemDescription(self, space, pos): - "Return a tuple describing the item at the given position" - - # acquire parameter descriptor - paramptr = lltype.malloc(roci.Ptr(roci.OCIParam).TO, - 1, flavor='raw') - try: - status = roci.OCIParamGet( - self.handle, roci.OCI_HTYPE_STMT, - self.environment.errorHandle, - rffi.cast(roci.dvoidpp, paramptr), - pos) - self.environment.checkForError( - status, - "Cursor_GetDescription(): parameter") - param = paramptr[0] - finally: - lltype.free(paramptr, flavor='raw') - - try: - # acquire usable type of item - varType = interp_variable.typeByOracleDescriptor( - param, self.environment) - - # acquire internal size of item - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_DATA_SIZE, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): internal size") - internalSize = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - # acquire name of item - nameptr = lltype.malloc(rffi.CArrayPtr(roci.oratext).TO, 1, - flavor='raw') - lenptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, nameptr), - lenptr, - roci.OCI_ATTR_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): name") - name = rffi.charpsize2str(nameptr[0], rffi.cast(lltype.Signed, lenptr[0])) - finally: - lltype.free(nameptr, flavor='raw') - lltype.free(lenptr, flavor='raw') - - # lookup precision and scale - if varType is interp_variable.VT_Float: - attrptr = lltype.malloc(rffi.CArrayPtr(roci.sb1).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_SCALE, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): scale") - scale = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PRECISION, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): precision") - precision = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - else: - scale = 0 - precision = 0 - - # lookup whether null is permitted for the attribute - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub1).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_IS_NULL, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): nullable") - nullable = rffi.cast(lltype.Signed, attrptr[0]) != 0 - finally: - lltype.free(attrptr, flavor='raw') - - # set display size based on data type - if varType is interp_variable.VT_String: - displaySize = internalSize - elif varType is interp_variable.VT_NationalCharString: - displaySize = internalSize / 2 - elif varType is interp_variable.VT_Binary: - displaySize = internalSize - elif varType is interp_variable.VT_FixedChar: - displaySize = internalSize - elif varType is interp_variable.VT_FixedNationalChar: - displaySize = internalSize / 2 - elif varType is interp_variable.VT_Float: - if precision: - displaySize = precision + 1 - if scale > 0: - displaySize += scale + 1 - else: - displaySize = 127 - elif varType is interp_variable.VT_DateTime: - displaySize = 23 - else: - displaySize = -1 - - # return the tuple - return [space.wrap(name), space.gettypeobject(varType.typedef), - space.wrap(displaySize), space.wrap(internalSize), - space.wrap(precision), space.wrap(scale), - space.wrap(nullable)] - - finally: - roci.OCIDescriptorFree(param, roci.OCI_DTYPE_PARAM) - - def _setBindVariablesByPos(self, space, - w_vars, numElements, arrayPos, defer): - "handle positional binds" - # make sure positional and named binds are not being intermixed - if self.bindDict is not None: - raise OperationError( - get(space).w_ProgrammingError, - space.wrap("positional and named binds cannot be intermixed")) - - if self.bindList is None: - self.bindList = [] - - vars_w = space.fixedview(w_vars) - for i in range(len(vars_w)): - w_value = vars_w[i] - if i < len(self.bindList): - origVar = self.bindList[i] - if space.is_w(origVar, space.w_None): - origVar = None - else: - origVar = None - newVar = self._setBindVariableHelper(space, w_value, origVar, - numElements, arrayPos, defer) - if newVar: - if i < len(self.bindList): - self.bindList[i] = newVar - else: - assert i == len(self.bindList) - self.bindList.append(newVar) - - def _setBindVariablesByName(self, space, - w_vars, numElements, arrayPos, defer): - "handle named binds" - # make sure positional and named binds are not being intermixed - if self.bindList is not None: - raise OperationError( - get(space).w_ProgrammingError, - space.wrap("positional and named binds cannot be intermixed")) - - if self.bindDict is None: - self.bindDict = space.newdict() - - items = space.fixedview(space.call_method(w_vars, "iteritems")) - for item in items: - w_key, w_value = space.fixedview(item, 2) - origVar = space.finditem(self.bindDict, w_key) - newVar = self._setBindVariableHelper(space, w_value, origVar, - numElements, arrayPos, defer) - if newVar: - space.setitem(self.bindDict, w_key, newVar) - - def _setBindVariableHelper(self, space, w_value, origVar, - numElements, arrayPos, defer): - - valueIsVariable = space.isinstance_w(w_value, get(space).w_Variable) - newVar = None - - # handle case where variable is already bound - if origVar: - assert isinstance(origVar, interp_variable.W_Variable) - - # if the value is a variable object, rebind it if necessary - if valueIsVariable: - newVar = space.interp_w(interp_variable.W_Variable, w_value) - assert isinstance(newVar, interp_variable.W_Variable) - if newVar == origVar: - newVar = None - - # if the number of elements has changed, create a new variable - # this is only necessary for executemany() since execute() always - # passes a value of 1 for the number of elements - elif numElements > origVar.allocatedElements: - newVar = origVar.clone( - self, numElements, origVar.size) - assert isinstance(newVar, interp_variable.W_Variable) - newVar.setValue(space, arrayPos, w_value) - - # otherwise, attempt to set the value - else: - try: - origVar.setValue(space, arrayPos, w_value) - except OperationError, e: - # executemany() should simply fail after the first element - if arrayPos > 0: - raise - # anything other than IndexError or TypeError should fail - if (not e.match(space, space.w_IndexError) and - not e.match(space, space.w_TypeError)): - raise - # catch the exception and try to create a new variable - origVar = None - - if not origVar: - # if the value is a variable object, bind it directly - if valueIsVariable: - newVar = space.interp_w(interp_variable.W_Variable, w_value) - assert isinstance(newVar, interp_variable.W_Variable) - newVar.boundPos = 0 - newVar.boundName = None - - # otherwise, create a new variable, unless the value is None and - # we wish to defer type assignment - elif not space.is_w(w_value, space.w_None) or not defer: - newVar = interp_variable.newVariableByValue(space, self, - w_value, - numElements) - assert isinstance(newVar, interp_variable.W_Variable) - newVar.setValue(space, arrayPos, w_value) - - assert newVar is None or isinstance(newVar, interp_variable.W_Variable) - return newVar - - def _performBind(self, space): - # set values and perform binds for all bind variables - if self.bindList: - for i in range(len(self.bindList)): - var = self.bindList[i] - assert isinstance(var, interp_variable.W_Variable) - var.bind(space, self, None, i + 1) - if self.bindDict: - items_w = space.fixedview( - space.call_method(self.bindDict, "iteritems")) - for w_item in items_w: - w_key, var = space.fixedview(w_item, 2) - assert isinstance(var, interp_variable.W_Variable) - var.bind(space, self, w_key, 0) - - # ensure that input sizes are reset - self.setInputSizes = False - - def _setRowCount(self): - if self.statementType == roci.OCI_STMT_SELECT: - self.rowCount = 0 - self.actualRows = -1 - self.rowNum = 0 - elif self.statementType in (roci.OCI_STMT_INSERT, - roci.OCI_STMT_UPDATE, - roci.OCI_STMT_DELETE): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_ROW_COUNT, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_SetRowCount()") - self.rowCount = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - else: - self.rowCount = -1 - - def _performDefine(self): - # determine number of items in select-list - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARAM_COUNT, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_PerformDefine()") - numParams = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - self.fetchVariables = [] - - # define a variable for each select-item - self.fetchArraySize = self.arraySize - for i in range(numParams): - var = interp_variable.define(self, i+1, self.fetchArraySize) - assert isinstance(var, interp_variable.W_Variable) - self.fetchVariables.append(var) - - def _verifyFetch(self, space): - # make sure the cursor is open - self._checkOpen(space) - - # fixup bound cursor, if necessary - self._fixupBoundCursor() - - # make sure the cursor is for a query - if self.statementType != roci.OCI_STMT_SELECT: - raise OperationError( - get(space).w_InterfaceError, - space.wrap("not a query")) - - def _fixupBoundCursor(self): - if self.handle and self.statementType < 0: - self._getStatementType() - if self.statementType == roci.OCI_STMT_SELECT: - self._performDefine() - self._setRowCount() - - def fetchone(self, space): - # verify fetch can be performed - self._verifyFetch(space) - - # setup return value - if self._moreRows(space): - return self._createRow(space) - - return space.w_None - - def fetchmany(self, space, w_numRows=None): - if w_numRows is not None: - numRows = space.int_w(w_numRows) - else: - numRows = self.arraySize - - # verify fetch can be performed - self._verifyFetch(space) - - return self._multiFetch(space, limit=numRows) - - def fetchall(self, space): - # verify fetch can be performed - self._verifyFetch(space) - - return self._multiFetch(space, limit=0) - - def descr_iter(self, space): - self._verifyFetch(space) - return space.wrap(self) - - def descr_next(self, space): - # verify fetch can be performed - self._verifyFetch(space) - - # setup return value - if self._moreRows(space): - return self._createRow(space) - - raise OperationError(space.w_StopIteration, space.w_None) - - def _moreRows(self, space): - if self.rowNum < self.actualRows: - return True - if self.actualRows < 0 or self.actualRows == self.fetchArraySize: - self._internalFetch(space, self.fetchArraySize) - if self.rowNum < self.actualRows: - return True - - return False - - def _internalFetch(self, space, numRows): - if not self.fetchVariables: - raise OperationError( - get(space).w_InterfaceError, - space.wrap("query not executed")) - - status = roci.OCIStmtFetch( - self.handle, - self.environment.errorHandle, - numRows, - roci.OCI_FETCH_NEXT, - roci.OCI_DEFAULT) - - if status != roci.OCI_NO_DATA: - self.environment.checkForError( - status, - "Cursor_InternalFetch(): fetch") - - for var in self.fetchVariables: - assert isinstance(var, interp_variable.W_Variable) - var.internalFetchNum += 1 - - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_ROW_COUNT, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_InternalFetch(): row count") - - self.actualRows = (rffi.cast(lltype.Signed, attrptr[0]) - - self.rowCount) - self.rowNum = 0 - finally: - lltype.free(attrptr, flavor='raw') - - def _multiFetch(self, space, limit=0): - results_w = [] - rowNum = 0 - - # fetch as many rows as possible - while limit == 0 or rowNum < limit: - rowNum += 1 - if not self._moreRows(space): - break - w_row = self._createRow(space) - results_w.append(w_row) - return space.newlist(results_w) - - def _createRow(self, space): - items_w = [] - # acquire the value for each item - for var in self.fetchVariables: - assert isinstance(var, interp_variable.W_Variable) - w_item = var.getValue(space, self.rowNum) - items_w.append(w_item) - - # increment row counters - self.rowNum += 1 - self.rowCount += 1 - - w_row = space.newtuple(items_w) - - # if a row factory is defined, call it - if self.w_rowFactory: - w_row = space.call(self.w_rowFactory, w_row) - - return w_row - - def _get_bind_info(self, space, numElements): - # avoid bus errors on 64bit platforms - numElements = numElements + (rffi.sizeof(roci.dvoidp) - - numElements % rffi.sizeof(roci.dvoidp)) - # initialize the buffers - bindNames = lltype.malloc(roci.Ptr(roci.oratext).TO, - numElements, flavor='raw') - bindNameLengths = lltype.malloc(roci.Ptr(roci.ub1).TO, - numElements, flavor='raw') - indicatorNames = lltype.malloc(roci.Ptr(roci.oratext).TO, - numElements, flavor='raw') - indicatorNameLengths = lltype.malloc(roci.Ptr(roci.ub1).TO, - numElements, flavor='raw') - duplicate = lltype.malloc(roci.Ptr(roci.ub1).TO, - numElements, flavor='raw') - bindHandles = lltype.malloc(roci.Ptr(roci.OCIBind).TO, - numElements, flavor='raw') - - foundElementsPtr = lltype.malloc(roci.Ptr(roci.sb4).TO, 1, - flavor='raw') - - try: - status = roci.OCIStmtGetBindInfo( - self.handle, - self.environment.errorHandle, - numElements, - 1, - foundElementsPtr, - bindNames, bindNameLengths, - indicatorNames, indicatorNameLengths, - duplicate, bindHandles) - if status != roci.OCI_NO_DATA: - self.environment.checkForError( - status, "Cursor_GetBindNames()") - - # Too few elements allocated - foundElements = rffi.cast(lltype.Signed, foundElementsPtr[0]) - if foundElements < 0: - return -foundElements, None - - names_w = [] - # process the bind information returned - for i in range(foundElements): - if rffi.cast(lltype.Signed, duplicate[i]): - continue - names_w.append( - w_string(space, - bindNames[i], - rffi.cast(lltype.Signed, bindNameLengths[i]))) - - return 0, names_w - finally: - lltype.free(bindNames, flavor='raw') - lltype.free(bindNameLengths, flavor='raw') - lltype.free(indicatorNames, flavor='raw') - lltype.free(indicatorNameLengths, flavor='raw') - lltype.free(duplicate, flavor='raw') - lltype.free(bindHandles, flavor='raw') - lltype.free(foundElementsPtr, flavor='raw') - - def bindnames(self, space): - # make sure the cursor is open - self._checkOpen(space) - - # ensure that a statement has already been prepared - if not self.w_statement: - raise OperationError(get(space).w_ProgrammingError, - space.wrap("statement must be prepared first")) - - nbElements, names = self._get_bind_info(space, 8) - if nbElements: - _, names = self._get_bind_info(space, nbElements) - return space.newlist(names) - - @unwrap_spec(size=int) - def var(self, space, w_type, size=0, w_arraysize=None, - w_inconverter=None, w_outconverter=None): - if space.is_none(w_arraysize): - arraySize = self.bindArraySize - else: - arraySize = space.int_w(w_arraysize) - - # determine the type of variable - varType = interp_variable.typeByPythonType(space, self, w_type) - if varType.isVariableLength and size == 0: - size = varType.size - - # create the variable - var = varType(self, arraySize, size) - var.w_inconverter = w_inconverter - var.w_outconverter = w_outconverter - - return space.wrap(var) - - @unwrap_spec(size=int) - def arrayvar(self, space, w_type, w_value, size=0): - # determine the type of variable - varType = interp_variable.typeByPythonType(space, self, w_type) - if varType.isVariableLength and size == 0: - size = varType.size - - # determine the number of elements to create - if space.isinstance_w(w_value, space.w_list): - numElements = space.len_w(w_value) - elif space.isinstance_w(w_value, space.w_int): - numElements = space.int_w(w_value) - else: - raise OperationError( - get(space).w_NotSupportedError, - space.wrap("expecting integer or list of values")) - - # create the variable - var = varType(self, numElements, size) - var.makeArray(space) - - # set the value, if applicable - if space.isinstance_w(w_value, space.w_list): - var.setArrayValue(space, w_value) - - return var - - def setinputsizes(self, space, __args__): - args_w, kw_w = __args__.unpack() - - # only expect keyword arguments or positional arguments, not both - if args_w and kw_w: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap( - "expecting argument or keyword arguments, not both")) - - # make sure the cursor is open - self._checkOpen(space) - - # eliminate existing bind variables - self.bindList = None - self.bindDict = None - - self.setInputSizes = True - - # process each input - if kw_w: - self.bindDict = space.newdict() - for key, w_value in kw_w.iteritems(): - var = interp_variable.newVariableByType( - space, self, w_value, self.bindArraySize) - space.setitem(self.bindDict, space.wrap(key), var) - return self.bindDict - else: - self.bindList = [None] * len(args_w) - for i in range(len(args_w)): - w_value = args_w[i] - if space.is_w(w_value, space.w_None): - var = None - else: - var = interp_variable.newVariableByType( - space, self, w_value, self.bindArraySize) - self.bindList[i] = var - return space.newlist(self.bindList) - - @unwrap_spec(outputSize=int, outputSizeColumn=int) - def setoutputsize(self, space, outputSize, outputSizeColumn=-1): - self.outputSize = outputSize - self.outputSizeColumn = outputSizeColumn - - - def arraysize_get(self, space): - return space.wrap(self.arraySize) - def arraysize_set(self, space, w_value): - self.arraySize = space.int_w(w_value) - - def bindarraysize_get(self, space): - return space.wrap(self.bindArraySize) - def bindarraysize_set(self, space, w_value): - self.bindArraySize = space.int_w(w_value) - - def bindvars_get(self, space): - if self.bindList: - return space.newlist(self.bindList) - if self.bindDict: - return self.bindDict - - def fetchvars_get(self, space): - return space.newlist(self.fetchVariables) - -W_Cursor.typedef = TypeDef( - 'Cursor', - execute = interp2app(W_Cursor.execute), - executemany = interp2app(W_Cursor.executemany), - prepare = interp2app(W_Cursor.prepare), - fetchone = interp2app(W_Cursor.fetchone), - fetchmany = interp2app(W_Cursor.fetchmany), - fetchall = interp2app(W_Cursor.fetchall), - close = interp2app(W_Cursor.close), - bindnames = interp2app(W_Cursor.bindnames), - callfunc = interp2app(W_Cursor.callfunc), - callproc = interp2app(W_Cursor.callproc), - var = interp2app(W_Cursor.var), - arrayvar = interp2app(W_Cursor.arrayvar), - setinputsizes = interp2app(W_Cursor.setinputsizes), - setoutputsize = interp2app(W_Cursor.setoutputsize), - - __iter__ = interp2app(W_Cursor.descr_iter), - next = interp2app(W_Cursor.descr_next), - - arraysize = GetSetProperty(W_Cursor.arraysize_get, - W_Cursor.arraysize_set), - bindarraysize = GetSetProperty(W_Cursor.bindarraysize_get, - W_Cursor.bindarraysize_set), - rowcount = interp_attrproperty('rowCount', W_Cursor), - statement = interp_attrproperty_w('w_statement', W_Cursor), - bindvars = GetSetProperty(W_Cursor.bindvars_get), From noreply at buildbot.pypy.org Wed Apr 30 19:01:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 30 Apr 2014 19:01:45 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into branch Message-ID: <20140430170145.0A6D41C33B3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71096:99d54aa2a1e4 Date: 2014-04-30 20:01 +0300 http://bitbucket.org/pypy/pypy/changeset/99d54aa2a1e4/ Log: merge default into branch diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -41,10 +41,9 @@ import pwd return pwd.getpwuid(os.getuid())[0] -def main(): +def main(branch, server): #XXX: handle release tags #XXX: handle validity checks - branch = sys.argv[1] lock = defer.DeferredLock() requests = [] def ebList(err): @@ -54,10 +53,11 @@ for builder in BUILDERS: print 'Forcing', builder, '...' - url = "http://buildbot.pypy.org/builders/" + builder + "/force" + url = "http://" + server + "/builders/" + builder + "/force" args = [ ('username', get_user()), ('revision', ''), + ('forcescheduler', 'Force Scheduler'), ('submit', 'Force Build'), ('branch', branch), ('comments', "Forced by command line script")] @@ -73,4 +73,11 @@ if __name__ == '__main__': log.startLogging(sys.stdout) - main() + import optparse + parser = optparse.OptionParser() + parser.add_option("-b", "--branch", help="branch to build", default='') + parser.add_option("-s", "--server", help="buildbot server", default="buildbot.pypy.org") + (options, args) = parser.parse_args() + if not options.branch: + parser.error("branch option required") + main(options.branch, options.server) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4347,3 +4347,10 @@ 'void') assert foo[0] == 1789201 lltype.free(foo, flavor='raw') + + def test_cast_float_to_singlefloat(self): + if not self.cpu.supports_singlefloats: + py.test.skip("requires singlefloats") + res = self.execute_operation(rop.CAST_FLOAT_TO_SINGLEFLOAT, + [BoxFloat(12.5)], 'int') + assert res.getint() == struct.unpack("I", struct.pack("f", 12.5))[0] diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1173,13 +1173,13 @@ self.mc.CVTSD2SS(loctmp, loc0) assert isinstance(resloc, RegLoc) assert isinstance(loctmp, RegLoc) - self.mc.MOVD_rx(resloc.value, loctmp.value) + self.mc.MOVD32_rx(resloc.value, loctmp.value) def genop_cast_singlefloat_to_float(self, op, arglocs, resloc): loc0, = arglocs assert isinstance(resloc, RegLoc) assert isinstance(loc0, RegLoc) - self.mc.MOVD_xr(resloc.value, loc0.value) + self.mc.MOVD32_xr(resloc.value, loc0.value) self.mc.CVTSS2SD_xx(resloc.value, resloc.value) def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc): @@ -1187,7 +1187,7 @@ if longlong.is_64_bit: assert isinstance(resloc, RegLoc) assert isinstance(loc0, RegLoc) - self.mc.MOVD(resloc, loc0) + self.mc.MOVDQ(resloc, loc0) else: self.mov(loc0, resloc) @@ -1196,7 +1196,7 @@ if longlong.is_64_bit: assert isinstance(resloc, RegLoc) assert isinstance(loc0, RegLoc) - self.mc.MOVD(resloc, loc0) + self.mc.MOVDQ(resloc, loc0) else: self.mov(loc0, resloc) @@ -1262,7 +1262,7 @@ loc = arglocs[0] assert isinstance(resloc, RegLoc) if isinstance(loc, RegLoc): - self.mc.MOVD_rx(resloc.value, loc.value) + self.mc.MOVD32_rx(resloc.value, loc.value) elif isinstance(loc, FrameLoc): self.mc.MOV_rb(resloc.value, loc.value) else: @@ -1277,16 +1277,16 @@ assert isinstance(loc1, RegLoc) assert isinstance(loc2, RegLoc) assert isinstance(resloc, RegLoc) - self.mc.MOVD_xr(loc2.value, loc1.value) + self.mc.MOVD32_xr(loc2.value, loc1.value) self.mc.PSRAD_xi(loc2.value, 31) # -> 0 or -1 - self.mc.MOVD_xr(resloc.value, loc1.value) + self.mc.MOVD32_xr(resloc.value, loc1.value) self.mc.PUNPCKLDQ_xx(resloc.value, loc2.value) def genop_llong_from_uint(self, op, arglocs, resloc): loc1, = arglocs assert isinstance(resloc, RegLoc) assert isinstance(loc1, RegLoc) - self.mc.MOVD_xr(resloc.value, loc1.value) + self.mc.MOVD32_xr(resloc.value, loc1.value) def genop_llong_eq(self, op, arglocs, resloc): loc1, loc2, locxtmp = arglocs @@ -1571,8 +1571,8 @@ self.mc.OR_rr(edx.value, eax.value) else: loc1, = arglocs - self.mc.MOVD_xr(loc1.value, edx.value) - self.mc.MOVD_xr(resloc.value, eax.value) + self.mc.MOVD32_xr(loc1.value, edx.value) + self.mc.MOVD32_xr(resloc.value, eax.value) self.mc.PUNPCKLDQ_xx(resloc.value, loc1.value) def genop_guard_guard_true(self, ign_1, guard_op, guard_token, locs, ign_2): diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -242,8 +242,8 @@ if self.tmpresloc is None: if self.restype == 'L': # long long # move eax/edx -> xmm0 - self.mc.MOVD_xr(resloc.value^1, edx.value) - self.mc.MOVD_xr(resloc.value, eax.value) + self.mc.MOVD32_xr(resloc.value^1, edx.value) + self.mc.MOVD32_xr(resloc.value, eax.value) self.mc.PUNPCKLDQ_xx(resloc.value, resloc.value^1) else: # float: we have to go via the stack @@ -435,7 +435,7 @@ if isinstance(src, ImmedLoc): self.mc.MOV(X86_64_SCRATCH_REG, src) src = X86_64_SCRATCH_REG - self.mc.MOVD(dst, src) + self.mc.MOVD32(dst, src) # Finally remap the arguments in the main regs remap_frame_layout(self.asm, src_locs, dst_locs, X86_64_SCRATCH_REG) @@ -447,7 +447,7 @@ if self.restype == 'S' and self.tmpresloc is None: # singlefloat return: use MOVD to load the target register # from the lower 32 bits of XMM0 - self.mc.MOVD(self.resloc, xmm0) + self.mc.MOVD32(self.resloc, xmm0) else: CallBuilderX86.load_result(self) @@ -469,7 +469,7 @@ if self.restype == 'S': # singlefloat return: use MOVD to store the lower 32 bits # of XMM0 into the tmpresloc (register or [ESP]) - self.mc.MOVD(self.tmpresloc, xmm0) + self.mc.MOVD32(self.tmpresloc, xmm0) else: assert self.restype == INT self.mc.MOV(self.tmpresloc, eax) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -662,7 +662,8 @@ PXOR = _binaryop('PXOR') PCMPEQD = _binaryop('PCMPEQD') - MOVD = _binaryop('MOVD') + MOVDQ = _binaryop('MOVDQ') + MOVD32 = _binaryop('MOVD32') CALL = _relative_unaryop('CALL') JMP = _relative_unaryop('JMP') diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -617,12 +617,17 @@ CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A', register(1, 8), stack_bp(2)) - # These work on machine sized registers, so MOVD is actually MOVQ - # when running on 64 bits. Note a bug in the Intel documentation: + # These work on machine sized registers, so "MOVDQ" is MOVD when running + # on 32 bits and MOVQ when running on 64 bits. "MOVD32" is always 32-bit. + # Note a bug in the Intel documentation: # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html - MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') - MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') - MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) + MOVDQ_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVDQ_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVDQ_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) + + MOVD32_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0') + MOVD32_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0') + MOVD32_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -183,8 +183,11 @@ g = open(inputname, 'w') g.write('\x09.string "%s"\n' % BEGIN_TAG) # - if instrname == 'MOVD' and self.WORD == 8: - instrname = 'MOVQ' + if instrname == 'MOVDQ': + if self.WORD == 8: + instrname = 'MOVQ' + else: + instrname = 'MOVD' if argmodes == 'xb': py.test.skip('"as" uses an undocumented alternate encoding??') # From noreply at buildbot.pypy.org Wed Apr 30 20:29:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 30 Apr 2014 20:29:26 +0200 (CEST) Subject: [pypy-commit] pypy default: fix numpy.maximum/minimum on nans Message-ID: <20140430182926.5DAA71C0EE9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71097:c691b2acd560 Date: 2014-04-30 14:24 -0400 http://bitbucket.org/pypy/pypy/changeset/c691b2acd560/ Log: fix numpy.maximum/minimum on nans diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -310,7 +310,7 @@ assert math.isnan(fmod(v, 2)) def test_minimum(self): - from numpypy import array, minimum + from numpypy import array, minimum, nan, isnan a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -318,8 +318,12 @@ for i in range(3): assert c[i] == min(a[i], b[i]) + arg1 = array([0, nan, nan]) + arg2 = array([nan, 0, nan]) + assert isnan(minimum(arg1, arg2)).all() + def test_maximum(self): - from numpypy import array, maximum + from numpypy import array, maximum, nan, isnan a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -327,6 +331,10 @@ for i in range(3): assert c[i] == max(a[i], b[i]) + arg1 = array([0, nan, nan]) + arg2 = array([nan, 0, nan]) + assert isnan(maximum(arg1, arg2)).all() + x = maximum(2, 3) assert x == 3 assert isinstance(x, (int, long)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -705,20 +705,20 @@ return math.fabs(v) @simple_binary_op + def max(self, v1, v2): + return v1 if v1 >= v2 or rfloat.isnan(v1) else v2 + + @simple_binary_op + def min(self, v1, v2): + return v1 if v1 <= v2 or rfloat.isnan(v1) else v2 + + @simple_binary_op def fmax(self, v1, v2): - if rfloat.isnan(v2): - return v1 - elif rfloat.isnan(v1): - return v2 - return max(v1, v2) + return v1 if v1 >= v2 or rfloat.isnan(v2) else v2 @simple_binary_op def fmin(self, v1, v2): - if rfloat.isnan(v2): - return v1 - elif rfloat.isnan(v1): - return v2 - return min(v1, v2) + return v1 if v1 <= v2 or rfloat.isnan(v2) else v2 @simple_binary_op def fmod(self, v1, v2): From noreply at buildbot.pypy.org Wed Apr 30 20:40:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 20:40:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Hack cProfile.py to avoid creating explicitly the main module's Message-ID: <20140430184042.AB15A1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71098:a1486cf0b55f Date: 2014-04-30 20:39 +0200 http://bitbucket.org/pypy/pypy/changeset/a1486cf0b55f/ Log: Hack cProfile.py to avoid creating explicitly the main module's dictionary. Instead, use new.module() and implicitly get it by reading the __dict__. This should fix one source of slow-down for cProfile on PyPy. diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys + import os, sys, new from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,12 +184,10 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - globs = { - '__file__': progname, - '__name__': '__main__', - '__package__': None, - } - runctx(code, globs, None, options.outfile, options.sort) + mainmod = new.module('__main__') + mainmod.__file__ = progname + mainmod.__package__ = None + runctx(code, mainmod.__dict__, None, options.outfile, options.sort) else: parser.print_usage() return parser From noreply at buildbot.pypy.org Wed Apr 30 20:54:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 20:54:47 +0200 (CEST) Subject: [pypy-commit] pypy default: fix Message-ID: <20140430185447.E96001C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71099:b7515f0e2ef5 Date: 2014-04-30 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/b7515f0e2ef5/ Log: fix diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4352,5 +4352,5 @@ if not self.cpu.supports_singlefloats: py.test.skip("requires singlefloats") res = self.execute_operation(rop.CAST_FLOAT_TO_SINGLEFLOAT, - [BoxFloat(12.5)], 'int') + [boxfloat(12.5)], 'int') assert res.getint() == struct.unpack("I", struct.pack("f", 12.5))[0] From noreply at buildbot.pypy.org Wed Apr 30 20:54:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 20:54:49 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140430185449.2B8701C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71100:984c2d21a9f5 Date: 2014-04-30 20:54 +0200 http://bitbucket.org/pypy/pypy/changeset/984c2d21a9f5/ Log: merge heads diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys + import os, sys, new from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,12 +184,10 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - globs = { - '__file__': progname, - '__name__': '__main__', - '__package__': None, - } - runctx(code, globs, None, options.outfile, options.sort) + mainmod = new.module('__main__') + mainmod.__file__ = progname + mainmod.__package__ = None + runctx(code, mainmod.__dict__, None, options.outfile, options.sort) else: parser.print_usage() return parser diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -310,7 +310,7 @@ assert math.isnan(fmod(v, 2)) def test_minimum(self): - from numpypy import array, minimum + from numpypy import array, minimum, nan, isnan a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -318,8 +318,12 @@ for i in range(3): assert c[i] == min(a[i], b[i]) + arg1 = array([0, nan, nan]) + arg2 = array([nan, 0, nan]) + assert isnan(minimum(arg1, arg2)).all() + def test_maximum(self): - from numpypy import array, maximum + from numpypy import array, maximum, nan, isnan a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -327,6 +331,10 @@ for i in range(3): assert c[i] == max(a[i], b[i]) + arg1 = array([0, nan, nan]) + arg2 = array([nan, 0, nan]) + assert isnan(maximum(arg1, arg2)).all() + x = maximum(2, 3) assert x == 3 assert isinstance(x, (int, long)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -705,20 +705,20 @@ return math.fabs(v) @simple_binary_op + def max(self, v1, v2): + return v1 if v1 >= v2 or rfloat.isnan(v1) else v2 + + @simple_binary_op + def min(self, v1, v2): + return v1 if v1 <= v2 or rfloat.isnan(v1) else v2 + + @simple_binary_op def fmax(self, v1, v2): - if rfloat.isnan(v2): - return v1 - elif rfloat.isnan(v1): - return v2 - return max(v1, v2) + return v1 if v1 >= v2 or rfloat.isnan(v2) else v2 @simple_binary_op def fmin(self, v1, v2): - if rfloat.isnan(v2): - return v1 - elif rfloat.isnan(v1): - return v2 - return min(v1, v2) + return v1 if v1 <= v2 or rfloat.isnan(v2) else v2 @simple_binary_op def fmod(self, v1, v2): From noreply at buildbot.pypy.org Wed Apr 30 21:13:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 21:13:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill the jit.elidable_promote() here. One remaining issue left in the jit code. Message-ID: <20140430191310.3376F1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71101:5084e653283f Date: 2014-04-30 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/5084e653283f/ Log: Kill the jit.elidable_promote() here. One remaining issue left in the jit code. diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -189,25 +189,24 @@ subentry._stop(tt, it) - at jit.elidable_promote() def create_spec_for_method(space, w_function, w_type): - w_function = w_function + class_name = None if isinstance(w_function, Function): name = w_function.name + # try to get the real class that defines the method, + # which is a superclass of the class of the instance + from pypy.objspace.std.typeobject import W_TypeObject # xxx + if isinstance(w_type, W_TypeObject): + w_realclass, _ = space.lookup_in_type_where(w_type, name) + if isinstance(w_realclass, W_TypeObject): + class_name = w_realclass.get_module_type_name() else: name = '?' - # try to get the real class that defines the method, - # which is a superclass of the class of the instance - from pypy.objspace.std.typeobject import W_TypeObject # xxx - class_name = w_type.getname(space) # if the rest doesn't work - if isinstance(w_type, W_TypeObject) and name != '?': - w_realclass, _ = space.lookup_in_type_where(w_type, name) - if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + if class_name is None: + class_name = w_type.getname(space) # if the rest doesn't work return "{method '%s' of '%s' objects}" % (name, class_name) - at jit.elidable_promote() def create_spec_for_function(space, w_func): if w_func.w_module is None: module = '' @@ -220,7 +219,6 @@ return '{%s%s}' % (module, w_func.name) - at jit.elidable_promote() def create_spec_for_object(space, w_obj): class_name = space.type(w_obj).getname(space) return "{'%s' object}" % (class_name,) From noreply at buildbot.pypy.org Wed Apr 30 21:13:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 21:13:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix this skipped test by moving the logic from rewrite.py to pure.py, Message-ID: <20140430191311.574AA1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71102:3f8b9a32c444 Date: 2014-04-30 21:12 +0200 http://bitbucket.org/pypy/pypy/changeset/3f8b9a32c444/ Log: Fix this skipped test by moving the logic from rewrite.py to pure.py, which runs after vstring.py. diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -57,6 +57,28 @@ self.emit_operation(nextop) def optimize_CALL_PURE(self, op): + # Step 1: check if all arguments are constant + arg_consts = [] + for i in range(op.numargs()): + arg = op.getarg(i) + const = self.get_constant_box(arg) + if const is None: + break + arg_consts.append(const) + else: + # all constant arguments: check if we already know the result + try: + result = self.optimizer.call_pure_results[arg_consts] + except KeyError: + pass + else: + # this removes a CALL_PURE with all constant arguments. + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return + + # Step 2: check if all arguments are the same as a previous + # CALL_PURE. args = self.optimizer.make_args_key(op) oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -515,30 +515,9 @@ return True # 0-length arraycopy return False - def optimize_CALL_PURE(self, op): - arg_consts = [] - for i in range(op.numargs()): - arg = op.getarg(i) - const = self.get_constant_box(arg) - if const is None: - break - arg_consts.append(const) - else: - # all constant arguments: check if we already know the result - try: - result = self.optimizer.call_pure_results[arg_consts] - except KeyError: - pass - else: - # this removes a CALL_PURE with all constant arguments. - self.make_constant(op.result, result) - self.last_emitted_operation = REMOVED - return - self.emit_operation(op) - def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: - # it was a CALL_PURE or a CALL_LOOPINVARIANT that was killed; + # it was a CALL_LOOPINVARIANT that was killed; # so we also kill the following GUARD_NO_EXCEPTION return self.emit_operation(op) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5164,7 +5164,6 @@ self.optimize_strunicode_loop(ops, expected) def test_call_pure_vstring_const(self): - py.test.skip("implement me") ops = """ [] p0 = newstr(3) From noreply at buildbot.pypy.org Wed Apr 30 21:13:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 30 Apr 2014 21:13:49 +0200 (CEST) Subject: [pypy-commit] pypy default: fix _socket.socket name/module Message-ID: <20140430191349.E21B51C03B3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71103:3f94cefb76d7 Date: 2014-04-30 15:12 -0400 http://bitbucket.org/pypy/pypy/changeset/3f94cefb76d7/ Log: fix _socket.socket name/module diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,7 +600,8 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("_socket.socket", +W_RSocket.typedef = TypeDef("socket", + __module__ = "_socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -313,6 +313,11 @@ cls.space = space cls.w_udir = space.wrap(str(udir)) + def test_module(self): + import _socket + assert _socket.socket.__name__ == 'socket' + assert _socket.socket.__module__ == '_socket' + def test_ntoa_exception(self): import _socket raises(_socket.error, _socket.inet_ntoa, "ab") From noreply at buildbot.pypy.org Wed Apr 30 21:18:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 30 Apr 2014 21:18:51 +0200 (CEST) Subject: [pypy-commit] pypy default: new module is deprecated, use types Message-ID: <20140430191851.B787A1C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71104:c4c478a2375b Date: 2014-04-30 15:18 -0400 http://bitbucket.org/pypy/pypy/changeset/c4c478a2375b/ Log: new module is deprecated, use types diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys, new + import os, sys, types from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,7 +184,7 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - mainmod = new.module('__main__') + mainmod = types.ModuleType('__main__') mainmod.__file__ = progname mainmod.__package__ = None runctx(code, mainmod.__dict__, None, options.outfile, options.sort) From noreply at buildbot.pypy.org Wed Apr 30 22:33:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 22:33:33 +0200 (CEST) Subject: [pypy-commit] pypy default: Another passing test Message-ID: <20140430203333.E1BBF1C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71105:5c15e6bbc796 Date: 2014-04-30 22:32 +0200 http://bitbucket.org/pypy/pypy/changeset/5c15e6bbc796/ Log: Another passing test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5182,6 +5182,25 @@ } self.optimize_loop(ops, expected, call_pure_results) + def test_call_pure_quasiimmut(self): + ops = """ + [] + quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) + guard_not_invalidated() [] + i0 = getfield_gc(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = call_pure(123, i0, descr=nonwritedescr) + finish(i1) + """ + expected = """ + [] + guard_not_invalidated() [] + finish(5) + """ + call_pure_results = { + (ConstInt(123), ConstInt(-4247)): ConstInt(5), + } + self.optimize_loop(ops, expected, call_pure_results) + def test_guard_not_forced_2_virtual(self): ops = """ [i0] From noreply at buildbot.pypy.org Wed Apr 30 22:57:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 30 Apr 2014 22:57:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Trying out with a promote_string() here... Message-ID: <20140430205737.7284C1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71106:fc261cbeb029 Date: 2014-04-30 22:56 +0200 http://bitbucket.org/pypy/pypy/changeset/fc261cbeb029/ Log: Trying out with a promote_string() here... diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -343,6 +343,7 @@ def _enter_builtin_call(self, key): self = jit.promote(self) + key = jit.promote_string(key) entry = self._get_or_make_builtin_entry(key) self.current_context = ProfilerContext(self, entry) @@ -351,6 +352,7 @@ if context is None: return self = jit.promote(self) + key = jit.promote_string(key) try: entry = self._get_or_make_builtin_entry(key, False) except KeyError: