[pypy-commit] pypy py3.5: Merge py3.5-raffael_t into py3.5
raffael_t
pypy.commits at gmail.com
Wed May 4 15:12:31 EDT 2016
Author: Raffael Tfirst <raffael.tfirst at gmail.com>
Branch: py3.5
Changeset: r84197:3007d740c2c9
Date: 2016-05-04 21:11 +0200
http://bitbucket.org/pypy/pypy/changeset/3007d740c2c9/
Log: Merge py3.5-raffael_t into py3.5
diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py
--- a/lib-python/3/opcode.py
+++ b/lib-python/3/opcode.py
@@ -85,10 +85,7 @@
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
-def_op('GET_AITER', 50)
-def_op('GET_ANEXT', 51)
-def_op('BEFORE_ASYNC_WITH', 52)
-
+def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
@@ -103,12 +100,11 @@
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
-def_op('GET_YIELD_FROM_ITER', 69)
+def_op('STORE_LOCALS', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
-def_op('GET_AWAITABLE', 73)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
@@ -116,8 +112,7 @@
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
-def_op('WITH_CLEANUP_START', 81)
-def_op('WITH_CLEANUP_FINISH', 82)
+def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
@@ -200,20 +195,9 @@
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
-def_op('LOAD_CLASSDEREF', 148)
-hasfree.append(148)
-
-jrel_op('SETUP_ASYNC_WITH', 154)
-
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
-def_op('BUILD_LIST_UNPACK', 149)
-def_op('BUILD_MAP_UNPACK', 150)
-def_op('BUILD_MAP_UNPACK_WITH_CALL', 151)
-def_op('BUILD_TUPLE_UNPACK', 152)
-def_op('BUILD_SET_UNPACK', 153)
-
# pypy modification, experimental bytecode
def_op('LOOKUP_METHOD', 201) # Index in name list
hasname.append(201)
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -557,6 +557,7 @@
ops.LIST_APPEND: -1,
ops.SET_ADD: -1,
ops.MAP_ADD: -2,
+ # XXX
ops.BINARY_POWER: -1,
ops.BINARY_MULTIPLY: -1,
@@ -566,6 +567,7 @@
ops.BINARY_SUBSCR: -1,
ops.BINARY_FLOOR_DIVIDE: -1,
ops.BINARY_TRUE_DIVIDE: -1,
+ ops.BINARY_MATRIX_MULTIPLY: -1,
ops.BINARY_LSHIFT: -1,
ops.BINARY_RSHIFT: -1,
ops.BINARY_AND: -1,
@@ -579,6 +581,7 @@
ops.INPLACE_MULTIPLY: -1,
ops.INPLACE_MODULO: -1,
ops.INPLACE_POWER: -1,
+ ops.INPLACE_MATRIX_MULTIPLY: -1,
ops.INPLACE_LSHIFT: -1,
ops.INPLACE_RSHIFT: -1,
ops.INPLACE_AND: -1,
@@ -613,6 +616,7 @@
ops.YIELD_FROM: -1,
ops.COMPARE_OP: -1,
+ # TODO
ops.LOOKUP_METHOD: 1,
ops.LOAD_NAME: 1,
@@ -649,8 +653,10 @@
ops.JUMP_IF_FALSE_OR_POP: 0,
ops.POP_JUMP_IF_TRUE: -1,
ops.POP_JUMP_IF_FALSE: -1,
+ # TODO
ops.JUMP_IF_NOT_DEBUG: 0,
+ # TODO
ops.BUILD_LIST_FROM_ARG: 1,
}
diff --git a/pypy/interpreter/astcompiler/assemble.py.orig b/pypy/interpreter/astcompiler/assemble.py.orig
new file mode 100644
--- /dev/null
+++ b/pypy/interpreter/astcompiler/assemble.py.orig
@@ -0,0 +1,765 @@
+"""Python control flow graph generation and bytecode assembly."""
+
+import os
+from rpython.rlib import rfloat
+from rpython.rlib.objectmodel import specialize, we_are_translated
+
+from pypy.interpreter.astcompiler import ast, consts, misc, symtable
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.pycode import PyCode
+from pypy.tool import stdlib_opcode as ops
+
+
+class StackDepthComputationError(Exception):
+ pass
+
+
+class Instruction(object):
+ """Represents a single opcode."""
+
+ def __init__(self, opcode, arg=0):
+ self.opcode = opcode
+ self.arg = arg
+ self.lineno = 0
+ self.has_jump = False
+
+ def size(self):
+ """Return the size of bytes of this instruction when it is
+ encoded.
+ """
+ if self.opcode >= ops.HAVE_ARGUMENT:
+ return (6 if self.arg > 0xFFFF else 3)
+ return 1
+
+ def jump_to(self, target, absolute=False):
+ """Indicate the target this jump instruction.
+
+ The opcode must be a JUMP opcode.
+ """
+ self.jump = (target, absolute)
+ self.has_jump = True
+
+ def __repr__(self):
+ data = [ops.opname[self.opcode]]
+ template = "<%s"
+ if self.opcode >= ops.HAVE_ARGUMENT:
+ data.append(self.arg)
+ template += " %i"
+ if self.has_jump:
+ data.append(self.jump[0])
+ template += " %s"
+ template += ">"
+ return template % tuple(data)
+
+
+class Block(object):
+ """A basic control flow block.
+
+ It has one entry point and several possible exit points. Its
+ instructions may be jumps to other blocks, or if control flow
+ reaches the end of the block, it continues to next_block.
+ """
+
+ marked = False
+ have_return = False
+ auto_inserted_return = False
+
+ def __init__(self):
+ self.instructions = []
+ self.next_block = None
+
+ def _post_order_see(self, stack, nextblock):
+ if nextblock.marked == 0:
+ nextblock.marked = 1
+ stack.append(nextblock)
+
+ def post_order(self):
+ """Return this block and its children in post order. This means
+ that the graph of blocks is first cleaned up to ignore
+ back-edges, thus turning it into a DAG. Then the DAG is
+ linearized. For example:
+
+ A --> B -\ => [A, D, B, C]
+ \-> D ---> C
+ """
+ resultblocks = []
+ stack = [self]
+ self.marked = 1
+ while stack:
+ current = stack[-1]
+ if current.marked == 1:
+ current.marked = 2
+ if current.next_block is not None:
+ self._post_order_see(stack, current.next_block)
+ else:
+ i = current.marked - 2
+ assert i >= 0
+ while i < len(current.instructions):
+ instr = current.instructions[i]
+ i += 1
+ if instr.has_jump:
+ current.marked = i + 2
+ self._post_order_see(stack, instr.jump[0])
+ break
+ else:
+ resultblocks.append(current)
+ stack.pop()
+ resultblocks.reverse()
+ return resultblocks
+
+ def code_size(self):
+ """Return the encoded size of all the instructions in this
+ block.
+ """
+ i = 0
+ for instr in self.instructions:
+ i += instr.size()
+ return i
+
+ def get_code(self):
+ """Encode the instructions in this block into bytecode."""
+ code = []
+ for instr in self.instructions:
+ opcode = instr.opcode
+ if opcode >= ops.HAVE_ARGUMENT:
+ arg = instr.arg
+ if instr.arg > 0xFFFF:
+ ext = arg >> 16
+ code.append(chr(ops.EXTENDED_ARG))
+ code.append(chr(ext & 0xFF))
+ code.append(chr(ext >> 8))
+ arg &= 0xFFFF
+ code.append(chr(opcode))
+ code.append(chr(arg & 0xFF))
+ code.append(chr(arg >> 8))
+ else:
+ code.append(chr(opcode))
+ return ''.join(code)
+
+
+def _make_index_dict_filter(syms, flag):
+ i = 0
+ result = {}
+ for name, scope in syms.iteritems():
+ if scope == flag:
+ result[name] = i
+ i += 1
+ return result
+
+
+ at specialize.argtype(0)
+def _iter_to_dict(iterable, offset=0):
+ result = {}
+ index = offset
+ for item in iterable:
+ result[item] = index
+ index += 1
+ return result
+
+
+class PythonCodeMaker(ast.ASTVisitor):
+ """Knows how to assemble a PyCode object."""
+
+ def __init__(self, space, name, first_lineno, scope, compile_info):
+ self.space = space
+ self.name = name
+ self.first_lineno = first_lineno
+ self.compile_info = compile_info
+ self.first_block = self.new_block()
+ self.use_block(self.first_block)
+ self.names = {}
+ self.var_names = _iter_to_dict(scope.varnames)
+ self.cell_vars = _make_index_dict_filter(scope.symbols,
+ symtable.SCOPE_CELL)
+ self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars))
+ self.w_consts = space.newdict()
+ self.argcount = 0
+ self.kwonlyargcount = 0
+ self.lineno_set = False
+ self.lineno = 0
+ self.add_none_to_final_return = True
+
+ def new_block(self):
+ return Block()
+
+ def use_block(self, block):
+ """Start emitting bytecode into block."""
+ self.current_block = block
+ self.instrs = block.instructions
+
+ def use_next_block(self, block=None):
+ """Set this block as the next_block for the last and use it."""
+ if block is None:
+ block = self.new_block()
+ self.current_block.next_block = block
+ self.use_block(block)
+ return block
+
+ def is_dead_code(self):
+ """Return False if any code can be meaningfully added to the
+ current block, or True if it would be dead code."""
+ # currently only True after a RETURN_VALUE.
+ return self.current_block.have_return
+
+ def emit_op(self, op):
+ """Emit an opcode without an argument."""
+ instr = Instruction(op)
+ if not self.lineno_set:
+ instr.lineno = self.lineno
+ self.lineno_set = True
+ if not self.is_dead_code():
+ self.instrs.append(instr)
+ if op == ops.RETURN_VALUE:
+ self.current_block.have_return = True
+ return instr
+
+ def emit_op_arg(self, op, arg):
+ """Emit an opcode with an integer argument."""
+ instr = Instruction(op, arg)
+ if not self.lineno_set:
+ instr.lineno = self.lineno
+ self.lineno_set = True
+ if not self.is_dead_code():
+ self.instrs.append(instr)
+
+ def emit_op_name(self, op, container, name):
+ """Emit an opcode referencing a name."""
+ self.emit_op_arg(op, self.add_name(container, name))
+
+ def emit_jump(self, op, block_to, absolute=False):
+ """Emit a jump opcode to another block."""
+ self.emit_op(op).jump_to(block_to, absolute)
+
+ def add_name(self, container, name):
+ """Get the index of a name in container."""
+ name = self.scope.mangle(name)
+ try:
+ index = container[name]
+ except KeyError:
+ index = len(container)
+ container[name] = index
+ return index
+
+ def add_const(self, obj):
+ """Add a W_Root to the constant array and return its location."""
+ space = self.space
+ # To avoid confusing equal but separate types, we hash store the type
+ # of the constant in the dictionary. Moreover, we have to keep the
+ # difference between -0.0 and 0.0 floats, and this recursively in
+ # tuples.
+ w_key = self._make_key(obj)
+
+ w_len = space.finditem(self.w_consts, w_key)
+ if w_len is None:
+ w_len = space.len(self.w_consts)
+ space.setitem(self.w_consts, w_key, w_len)
+ if space.int_w(w_len) == 0:
+ self.scope.doc_removable = False
+ return space.int_w(w_len)
+
+ def _make_key(self, obj):
+ # see the tests 'test_zeros_not_mixed*' in ../test/test_compiler.py
+ space = self.space
+ w_type = space.type(obj)
+ if space.is_w(w_type, space.w_float):
+ val = space.float_w(obj)
+ if val == 0.0 and rfloat.copysign(1., val) < 0:
+ w_key = space.newtuple([obj, space.w_float, space.w_None])
+ else:
+ w_key = space.newtuple([obj, space.w_float])
+ elif space.is_w(w_type, space.w_complex):
+ w_real = space.getattr(obj, space.wrap("real"))
+ w_imag = space.getattr(obj, space.wrap("imag"))
+ real = space.float_w(w_real)
+ imag = space.float_w(w_imag)
+ real_negzero = (real == 0.0 and
+ rfloat.copysign(1., real) < 0)
+ imag_negzero = (imag == 0.0 and
+ rfloat.copysign(1., imag) < 0)
+ if real_negzero and imag_negzero:
+ tup = [obj, space.w_complex, space.w_None, space.w_None,
+ space.w_None]
+ elif imag_negzero:
+ tup = [obj, space.w_complex, space.w_None, space.w_None]
+ elif real_negzero:
+ tup = [obj, space.w_complex, space.w_None]
+ else:
+ tup = [obj, space.w_complex]
+ w_key = space.newtuple(tup)
+ elif space.is_w(w_type, space.w_tuple):
+ result_w = [obj, w_type]
+ for w_item in space.fixedview(obj):
+ result_w.append(self._make_key(w_item))
+ w_key = space.newtuple(result_w[:])
+ elif isinstance(obj, PyCode):
+ w_key = space.newtuple([obj, w_type, space.id(obj)])
+ else:
+ w_key = space.newtuple([obj, w_type])
+ return w_key
+
+ def load_const(self, obj):
+ index = self.add_const(obj)
+ self.emit_op_arg(ops.LOAD_CONST, index)
+
+ def update_position(self, lineno, force=False):
+ """Possibly change the lineno for the next instructions."""
+ if force or lineno > self.lineno:
+ self.lineno = lineno
+ self.lineno_set = False
+
+ def _resolve_block_targets(self, blocks):
+ """Compute the arguments of jump instructions."""
+ last_extended_arg_count = 0
+ # The reason for this loop is extended jumps. EXTENDED_ARG
+ # extends the bytecode size, so it might invalidate the offsets
+ # we've already given. Thus we have to loop until the number of
+ # extended args is stable. Any extended jump at all is
+ # extremely rare, so performance is not too concerning.
+ while True:
+ extended_arg_count = 0
+ offset = 0
+ force_redo = False
+ # Calculate the code offset of each block.
+ for block in blocks:
+ block.offset = offset
+ offset += block.code_size()
+ for block in blocks:
+ offset = block.offset
+ for instr in block.instructions:
+ offset += instr.size()
+ if instr.has_jump:
+ target, absolute = instr.jump
+ op = instr.opcode
+ # Optimize an unconditional jump going to another
+ # unconditional jump.
+ if op == ops.JUMP_ABSOLUTE or op == ops.JUMP_FORWARD:
+ if target.instructions:
+ target_op = target.instructions[0].opcode
+ if target_op == ops.JUMP_ABSOLUTE:
+ target = target.instructions[0].jump[0]
+ instr.opcode = ops.JUMP_ABSOLUTE
+ absolute = True
+ elif target_op == ops.RETURN_VALUE:
+ # Replace JUMP_* to a RETURN into
+ # just a RETURN
+ instr.opcode = ops.RETURN_VALUE
+ instr.arg = 0
+ instr.has_jump = False
+ # The size of the code changed,
+ # we have to trigger another pass
+ force_redo = True
+ continue
+ if absolute:
+ jump_arg = target.offset
+ else:
+ jump_arg = target.offset - offset
+ instr.arg = jump_arg
+ if jump_arg > 0xFFFF:
+ extended_arg_count += 1
+ if (extended_arg_count == last_extended_arg_count and
+ not force_redo):
+ break
+ else:
+ last_extended_arg_count = extended_arg_count
+
+ def _build_consts_array(self):
+ """Turn the applevel constants dictionary into a list."""
+ w_consts = self.w_consts
+ space = self.space
+ consts_w = [space.w_None] * space.len_w(w_consts)
+ w_iter = space.iter(w_consts)
+ first = space.wrap(0)
+ while True:
+ try:
+ w_key = space.next(w_iter)
+ except OperationError as e:
+ if not e.match(space, space.w_StopIteration):
+ raise
+ break
+ w_index = space.getitem(w_consts, w_key)
+ w_constant = space.getitem(w_key, first)
+ w_constant = misc.intern_if_common_string(space, w_constant)
+ consts_w[space.int_w(w_index)] = w_constant
+ return consts_w
+
+ def _get_code_flags(self):
+ """Get an extra flags that should be attached to the code object."""
+ raise NotImplementedError
+
+ def _stacksize(self, blocks):
+ """Compute co_stacksize."""
+ for block in blocks:
+ block.initial_depth = 0
+ # Assumes that it is sufficient to walk the blocks in 'post-order'.
+ # This means we ignore all back-edges, but apart from that, we only
+ # look into a block when all the previous blocks have been done.
+ self._max_depth = 0
+ for block in blocks:
+ depth = self._do_stack_depth_walk(block)
+ if block.auto_inserted_return and depth != 0:
+ os.write(2, "StackDepthComputationError in %s at %s:%s\n" % (
+ self.compile_info.filename, self.name, self.first_lineno))
+ raise StackDepthComputationError # fatal error
+ return self._max_depth
+
+ def _next_stack_depth_walk(self, nextblock, depth):
+ if depth > nextblock.initial_depth:
+ nextblock.initial_depth = depth
+
+ def _do_stack_depth_walk(self, block):
+ depth = block.initial_depth
+ for instr in block.instructions:
+ depth += _opcode_stack_effect(instr.opcode, instr.arg)
+ if depth >= self._max_depth:
+ self._max_depth = depth
+ jump_op = instr.opcode
+ if instr.has_jump:
+ target_depth = depth
+ if jump_op == ops.FOR_ITER:
+ target_depth -= 2
+ elif (jump_op == ops.SETUP_FINALLY or
+ jump_op == ops.SETUP_EXCEPT or
+ jump_op == ops.SETUP_WITH):
+ if jump_op == ops.SETUP_FINALLY:
+ target_depth += 4
+ elif jump_op == ops.SETUP_EXCEPT:
+ target_depth += 4
+ elif jump_op == ops.SETUP_WITH:
+ target_depth += 3
+ if target_depth > self._max_depth:
+ self._max_depth = target_depth
+ elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
+ jump_op == ops.JUMP_IF_FALSE_OR_POP):
+ depth -= 1
+ self._next_stack_depth_walk(instr.jump[0], target_depth)
+ if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
+ # Nothing more can occur.
+ break
+ elif jump_op == ops.RETURN_VALUE or jump_op == ops.RAISE_VARARGS:
+ # Nothing more can occur.
+ break
+ else:
+ if block.next_block:
+ self._next_stack_depth_walk(block.next_block, depth)
+ return depth
+
+ def _build_lnotab(self, blocks):
+ """Build the line number table for tracebacks and tracing."""
+ current_line = self.first_lineno
+ current_off = 0
+ table = []
+ push = table.append
+ for block in blocks:
+ offset = block.offset
+ for instr in block.instructions:
+ if instr.lineno:
+ # compute deltas
+ line = instr.lineno - current_line
+ if line < 0:
+ continue
+ addr = offset - current_off
+ # Python assumes that lineno always increases with
+ # increasing bytecode address (lnotab is unsigned
+ # char). Depending on when SET_LINENO instructions
+ # are emitted this is not always true. Consider the
+ # code:
+ # a = (1,
+ # b)
+ # In the bytecode stream, the assignment to "a"
+ # occurs after the loading of "b". This works with
+ # the C Python compiler because it only generates a
+ # SET_LINENO instruction for the assignment.
+ if line or addr:
+ while addr > 255:
+ push(chr(255))
+ push(chr(0))
+ addr -= 255
+ while line > 255:
+ push(chr(addr))
+ push(chr(255))
+ line -= 255
+ addr = 0
+ push(chr(addr))
+ push(chr(line))
+ current_line = instr.lineno
+ current_off = offset
+ offset += instr.size()
+ return ''.join(table)
+
+ def assemble(self):
+ """Build a PyCode object."""
+ # Unless it's interactive, every code object must end in a return.
+ if not self.current_block.have_return:
+ self.use_next_block()
+ if self.add_none_to_final_return:
+ self.load_const(self.space.w_None)
+ self.emit_op(ops.RETURN_VALUE)
+ self.current_block.auto_inserted_return = True
+ # Set the first lineno if it is not already explicitly set.
+ if self.first_lineno == -1:
+ if self.first_block.instructions:
+ self.first_lineno = self.first_block.instructions[0].lineno
+ else:
+ self.first_lineno = 1
+ blocks = self.first_block.post_order()
+ self._resolve_block_targets(blocks)
+ lnotab = self._build_lnotab(blocks)
+ stack_depth = self._stacksize(blocks)
+ consts_w = self._build_consts_array()
+ names = _list_from_dict(self.names)
+ var_names = _list_from_dict(self.var_names)
+ cell_names = _list_from_dict(self.cell_vars)
+ free_names = _list_from_dict(self.free_vars, len(cell_names))
+ flags = self._get_code_flags()
+ # (Only) inherit compilerflags in PyCF_MASK
+ flags |= (self.compile_info.flags & consts.PyCF_MASK)
+ bytecode = ''.join([block.get_code() for block in blocks])
+ return PyCode(self.space,
+ self.argcount,
+ self.kwonlyargcount,
+ len(self.var_names),
+ stack_depth,
+ flags,
+ bytecode,
+ list(consts_w),
+ names,
+ var_names,
+ self.compile_info.filename,
+ self.name,
+ self.first_lineno,
+ lnotab,
+ free_names,
+ cell_names,
+ self.compile_info.hidden_applevel)
+
+
+def _list_from_dict(d, offset=0):
+ result = [None] * len(d)
+ for obj, index in d.iteritems():
+ result[index - offset] = obj
+ return result
+
+
+_static_opcode_stack_effects = {
+ ops.NOP: 0,
+
+ ops.POP_TOP: -1,
+ ops.ROT_TWO: 0,
+ ops.ROT_THREE: 0,
+ ops.DUP_TOP: 1,
+ ops.DUP_TOP_TWO: 2,
+
+ ops.UNARY_POSITIVE: 0,
+ ops.UNARY_NEGATIVE: 0,
+ ops.UNARY_NOT: 0,
+ ops.UNARY_INVERT: 0,
+
+ ops.LIST_APPEND: -1,
+ ops.SET_ADD: -1,
+ ops.MAP_ADD: -2,
+<<<<<<< local
+=======
+ # XXX
+ ops.STORE_MAP: -2,
+>>>>>>> other
+
+ ops.BINARY_POWER: -1,
+ ops.BINARY_MULTIPLY: -1,
+ ops.BINARY_MODULO: -1,
+ ops.BINARY_ADD: -1,
+ ops.BINARY_SUBTRACT: -1,
+ ops.BINARY_SUBSCR: -1,
+ ops.BINARY_FLOOR_DIVIDE: -1,
+ ops.BINARY_TRUE_DIVIDE: -1,
+ ops.BINARY_MATRIX_MULTIPLY: -1,
+ ops.BINARY_LSHIFT: -1,
+ ops.BINARY_RSHIFT: -1,
+ ops.BINARY_AND: -1,
+ ops.BINARY_OR: -1,
+ ops.BINARY_XOR: -1,
+
+ ops.INPLACE_FLOOR_DIVIDE: -1,
+ ops.INPLACE_TRUE_DIVIDE: -1,
+ ops.INPLACE_ADD: -1,
+ ops.INPLACE_SUBTRACT: -1,
+ ops.INPLACE_MULTIPLY: -1,
+ ops.INPLACE_MODULO: -1,
+ ops.INPLACE_POWER: -1,
+ ops.INPLACE_MATRIX_MULTIPLY: -1,
+ ops.INPLACE_LSHIFT: -1,
+ ops.INPLACE_RSHIFT: -1,
+ ops.INPLACE_AND: -1,
+ ops.INPLACE_OR: -1,
+ ops.INPLACE_XOR: -1,
+
+ ops.STORE_SUBSCR: -3,
+ ops.DELETE_SUBSCR: -2,
+
+ ops.GET_ITER: 0,
+ ops.FOR_ITER: 1,
+ ops.BREAK_LOOP: 0,
+ ops.CONTINUE_LOOP: 0,
+ ops.SETUP_LOOP: 0,
+
+ ops.PRINT_EXPR: -1,
+
+<<<<<<< local
+ ops.WITH_CLEANUP_START: -1,
+ ops.WITH_CLEANUP_FINISH: -1, # XXX Sometimes more
+=======
+ # TODO
+ ops.WITH_CLEANUP: -1,
+>>>>>>> other
+ ops.LOAD_BUILD_CLASS: 1,
+<<<<<<< local
+=======
+ # TODO
+ ops.STORE_LOCALS: -1,
+>>>>>>> other
+ ops.POP_BLOCK: 0,
+ ops.POP_EXCEPT: -1,
+ ops.END_FINALLY: -4, # assume always 4: we pretend that SETUP_FINALLY
+ # pushes 4. In truth, it would only push 1 and
+ # the corresponding END_FINALLY only pops 1.
+ ops.SETUP_WITH: 1,
+ ops.SETUP_FINALLY: 0,
+ ops.SETUP_EXCEPT: 0,
+
+ ops.RETURN_VALUE: -1,
+ ops.YIELD_VALUE: 0,
+ ops.YIELD_FROM: -1,
+ ops.COMPARE_OP: -1,
+
+ # TODO
+ ops.LOOKUP_METHOD: 1,
+
+ ops.LOAD_NAME: 1,
+ ops.STORE_NAME: -1,
+ ops.DELETE_NAME: 0,
+
+ ops.LOAD_FAST: 1,
+ ops.STORE_FAST: -1,
+ ops.DELETE_FAST: 0,
+
+ ops.LOAD_ATTR: 0,
+ ops.STORE_ATTR: -2,
+ ops.DELETE_ATTR: -1,
+
+ ops.LOAD_GLOBAL: 1,
+ ops.STORE_GLOBAL: -1,
+ ops.DELETE_GLOBAL: 0,
+ ops.DELETE_DEREF: 0,
+
+ ops.LOAD_CLOSURE: 1,
+ ops.LOAD_DEREF: 1,
+ ops.STORE_DEREF: -1,
+ ops.DELETE_DEREF: 0,
+
+ ops.LOAD_CONST: 1,
+
+ ops.IMPORT_STAR: -1,
+ ops.IMPORT_NAME: -1,
+ ops.IMPORT_FROM: 1,
+
+ ops.JUMP_FORWARD: 0,
+ ops.JUMP_ABSOLUTE: 0,
+ ops.JUMP_IF_TRUE_OR_POP: 0,
+ ops.JUMP_IF_FALSE_OR_POP: 0,
+ ops.POP_JUMP_IF_TRUE: -1,
+ ops.POP_JUMP_IF_FALSE: -1,
+ # TODO
+ ops.JUMP_IF_NOT_DEBUG: 0,
+
+ # TODO
+ ops.BUILD_LIST_FROM_ARG: 1,
+}
+
+
+def _compute_UNPACK_SEQUENCE(arg):
+ return arg - 1
+
+def _compute_UNPACK_EX(arg):
+ return (arg & 0xFF) + (arg >> 8)
+
+def _compute_BUILD_TUPLE(arg):
+ return 1 - arg
+
+def _compute_BUILD_LIST(arg):
+ return 1 - arg
+
+def _compute_BUILD_SET(arg):
+ return 1 - arg
+
+def _compute_BUILD_MAP(arg):
+ return 1 - 2 * arg
+
+def _compute_BUILD_MAP_UNPACK(arg):
+ return 1 - arg
+
+def _compute_MAKE_CLOSURE(arg):
+ return -2 - _num_args(arg) - ((arg >> 16) & 0xFFFF)
+
+def _compute_MAKE_FUNCTION(arg):
+ return -1 - _num_args(arg) - ((arg >> 16) & 0xFFFF)
+
+def _compute_BUILD_SLICE(arg):
+ if arg == 3:
+ return -2
+ else:
+ return -1
+
+def _compute_RAISE_VARARGS(arg):
+ return -arg
+
+def _num_args(oparg):
+ return (oparg % 256) + 2 * ((oparg // 256) % 256)
+
+def _compute_CALL_FUNCTION(arg):
+ return -_num_args(arg)
+
+def _compute_CALL_FUNCTION_VAR(arg):
+ return -_num_args(arg) - 1
+
+def _compute_CALL_FUNCTION_KW(arg):
+ return -_num_args(arg) - 1
+
+def _compute_CALL_FUNCTION_VAR_KW(arg):
+ return -_num_args(arg) - 2
+
+def _compute_CALL_METHOD(arg):
+ return -_num_args(arg) - 1
+
+
+_stack_effect_computers = {}
+for name, func in globals().items():
+ if name.startswith("_compute_"):
+ func._always_inline_ = True
+ _stack_effect_computers[getattr(ops, name[9:])] = func
+for op, value in _static_opcode_stack_effects.iteritems():
+ def func(arg, _value=value):
+ return _value
+ func._always_inline_ = True
+ _stack_effect_computers[op] = func
+del name, func, op, value
+
+
+def _opcode_stack_effect(op, arg):
+ """Return the stack effect of a opcode an its argument."""
+ if we_are_translated():
+ for possible_op in ops.unrolling_opcode_descs:
+ # EXTENDED_ARG should never get in here.
+ if possible_op.index == ops.EXTENDED_ARG:
+ continue
+ if op == possible_op.index:
+ return _stack_effect_computers[possible_op.index](arg)
+ else:
+ raise AssertionError("unknown opcode: %s" % (op,))
+ else:
+ try:
+ return _static_opcode_stack_effects[op]
+ except KeyError:
+ try:
+ return _stack_effect_computers[op](arg)
+ except KeyError:
+ raise KeyError("Unknown stack effect for %s (%s)" %
+ (ops.opname[op], op))
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -2970,6 +2970,8 @@
return 11
if space.isinstance_w(w_node, get(space).w_FloorDiv):
return 12
+ if space.isinstance_w(w_node, get(space).w_MatMul):
+ return 13
raise oefmt(space.w_TypeError,
"Expected operator node, got %T", w_node)
State.ast_type('operator', 'AST', None)
@@ -3034,6 +3036,11 @@
return space.call_function(get(space).w_FloorDiv)
State.ast_type('FloorDiv', 'operator', None)
+class _MatMul(operator):
+ def to_object(self, space):
+ return space.call_function(get(space).w_MatMul)
+State.ast_type('MatMul', 'operator', None)
+
Add = 1
Sub = 2
Mult = 3
@@ -3046,6 +3053,7 @@
BitXor = 10
BitAnd = 11
FloorDiv = 12
+MatMul = 13
operator_to_class = [
_Add,
@@ -3060,6 +3068,7 @@
_BitXor,
_BitAnd,
_FloorDiv,
+ _MatMul,
]
class unaryop(AST):
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -17,6 +17,7 @@
'/=' : ast.Div,
'//=' : ast.FloorDiv,
'%=' : ast.Mod,
+ '@=' : ast.MatMul,
'<<=' : ast.LShift,
'>>=' : ast.RShift,
'&=' : ast.BitAnd,
@@ -37,7 +38,8 @@
tokens.STAR : ast.Mult,
tokens.SLASH : ast.Div,
tokens.DOUBLESLASH : ast.FloorDiv,
- tokens.PERCENT : ast.Mod
+ tokens.PERCENT : ast.Mod,
+ tokens.AT : ast.MatMul
})
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -65,7 +65,8 @@
ast.BitOr: ops.BINARY_OR,
ast.BitAnd: ops.BINARY_AND,
ast.BitXor: ops.BINARY_XOR,
- ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE
+ ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE,
+ ast.MatMul: ops.BINARY_MATRIX_MULTIPLY
})
inplace_operations = misc.dict_to_switch({
@@ -80,7 +81,8 @@
ast.BitOr: ops.INPLACE_OR,
ast.BitAnd: ops.INPLACE_AND,
ast.BitXor: ops.INPLACE_XOR,
- ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE
+ ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE,
+ ast.MatMul: ops.INPLACE_MATRIX_MULTIPLY
})
compare_operations = misc.dict_to_switch({
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -134,6 +134,7 @@
ast.BitOr : _binary_fold("or_"),
ast.BitXor : _binary_fold("xor"),
ast.BitAnd : _binary_fold("and_"),
+ ast.MatMul : _binary_fold("matmul"),
}
unrolling_binary_folders = unrolling_iterable(binary_folders.items())
diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl
--- a/pypy/interpreter/astcompiler/tools/Python.asdl
+++ b/pypy/interpreter/astcompiler/tools/Python.asdl
@@ -95,7 +95,7 @@
boolop = And | Or
operator = Add | Sub | Mult | Div | Mod | Pow | LShift
- | RShift | BitOr | BitXor | BitAnd | FloorDiv
+ | RShift | BitOr | BitXor | BitAnd | FloorDiv | MatMul
unaryop = Invert | Not | UAdd | USub
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1891,6 +1891,8 @@
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
('userdel', 'del', 1, ['__del__']),
+ ('matmul', '@', 2, ['__matmul__', '__rmatmul__']),
+ ('inplace_matmul', '@=', 2, ['__imatmul__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -228,6 +228,8 @@
self.BINARY_AND(oparg, next_instr)
elif opcode == opcodedesc.BINARY_FLOOR_DIVIDE.index:
self.BINARY_FLOOR_DIVIDE(oparg, next_instr)
+ elif opcode == opcodedesc.BINARY_MATRIX_MULTIPLY.index:
+ self.BINARY_MATRIX_MULTIPLY(oparg, next_instr)
elif opcode == opcodedesc.BINARY_LSHIFT.index:
self.BINARY_LSHIFT(oparg, next_instr)
elif opcode == opcodedesc.BINARY_MODULO.index:
@@ -571,6 +573,7 @@
BINARY_DIVIDE = binaryoperation("div")
# XXX BINARY_DIVIDE must fall back to BINARY_TRUE_DIVIDE with -Qnew
BINARY_MODULO = binaryoperation("mod")
+ BINARY_MATRIX_MULTIPLY = binaryoperation("matmul")
BINARY_ADD = binaryoperation("add")
BINARY_SUBTRACT = binaryoperation("sub")
BINARY_SUBSCR = binaryoperation("getitem")
@@ -589,9 +592,11 @@
INPLACE_MULTIPLY = binaryoperation("inplace_mul")
INPLACE_TRUE_DIVIDE = binaryoperation("inplace_truediv")
INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_floordiv")
+ INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_matmul")
INPLACE_DIVIDE = binaryoperation("inplace_div")
# XXX INPLACE_DIVIDE must fall back to INPLACE_TRUE_DIVIDE with -Qnew
INPLACE_MODULO = binaryoperation("inplace_mod")
+ INPLACE_MATRIX_MULTIPLY = binaryoperation("inplace_matmul")
INPLACE_ADD = binaryoperation("inplace_add")
INPLACE_SUBTRACT = binaryoperation("inplace_sub")
INPLACE_LSHIFT = binaryoperation("inplace_lshift")
diff --git a/pypy/interpreter/pyparser/data/Grammar3.5 b/pypy/interpreter/pyparser/data/Grammar3.5
new file mode 100644
--- /dev/null
+++ b/pypy/interpreter/pyparser/data/Grammar3.5
@@ -0,0 +1,159 @@
+# Grammar for Python
+
+# Note: Changing the grammar specified in this file will most likely
+# require corresponding changes in the parser module
+# (../Modules/parsermodule.c). If you can't make the changes to
+# that module yourself, please co-ordinate the required changes
+# with someone who can; ask around on python-dev for help. Fred
+# Drake <fdrake at acm.org> will probably be listening there.
+
+# NOTE WELL: You should also follow all the steps listed at
+# https://docs.python.org/devguide/grammar.html
+
+# Start symbols for the grammar:
+# single_input is a single interactive statement;
+# file_input is a module or sequence of commands read from an input file;
+# eval_input is the input for the eval() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+file_input: (NEWLINE | stmt)* ENDMARKER
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef)
+# | async_funcdef)
+
+# async_funcdef: ASYNC funcdef
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+
+parameters: '(' [typedargslist] ')'
+typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [','
+ ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]]
+ | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
+tfpdef: NAME [':' test]
+varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
+ ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]]
+ | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
+vfpdef: NAME
+
+stmt: simple_stmt | compound_stmt
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
+ import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
+expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
+ ('=' (yield_expr|testlist_star_expr))*)
+testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+ '<<=' | '>>=' | '**=' | '//=')
+# For normal assignments, additional restrictions enforced by the interpreter
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
+import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
+ 'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: 'global' NAME (',' NAME)*
+nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
+# | async_stmt
+# async_stmt: ASYNC (funcdef | with_stmt | for_stmt)
+if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+while_stmt: 'while' test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+ ((except_clause ':' suite)+
+ ['else' ':' suite]
+ ['finally' ':' suite] |
+ 'finally' ':' suite))
+with_stmt: 'with' with_item (',' with_item)* ':' suite
+with_item: test ['as' expr]
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test ['as' NAME]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+test: or_test ['if' or_test 'else' test] | lambdef
+test_nocond: or_test | lambdef_nocond
+lambdef: 'lambda' [varargslist] ':' test
+lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+# <> isn't actually a valid comparison operator in Python. It's here for the
+# sake of a __future__ import described in PEP 401 (which really works :-)
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+star_expr: '*' expr
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+# power: atom_expr ['**' factor]
+power: atom trailer* ['**' factor]
+# atom_expr: [AWAIT] atom trailer*
+atom: ('(' [yield_expr|testlist_comp] ')' |
+ '[' [testlist_comp] ']' |
+ '{' [dictorsetmaker] '}' |
+ NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False')
+testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+testlist: test (',' test)* [',']
+dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
+ (test (comp_for | (',' test)* [','])) )
+#dictorsetmaker: ( ((test ':' test | '**' expr)
+# (comp_for | (',' (test ':' test | '**' expr))* [','])) |
+# ((test | star_expr)
+# (comp_for | (',' (test | star_expr))* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: (argument ',')* (argument [',']
+ |'*' test (',' argument)* [',' '**' test]
+ |'**' test)
+#arglist: argument (',' argument)* [',']
+
+# The reason that keywords are test nodes instead of NAME is that using NAME
+# results in an ambiguity. ast.c makes sure it's a NAME.
+# "test '=' test" is really "keyword '=' test", but we have no such token.
+# These need to be in a single rule to avoid grammar that is ambiguous
+# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
+# we explicitly match '*' here, too, to give it proper precedence.
+# Illegal combinations and orderings are blocked in ast.c:
+# multiple (test comp_for) arguements are blocked; keyword unpackings
+# that precede iterable unpackings are blocked; etc.
+argument: test [comp_for] | test '=' test # Really [keyword '='] test
+#argument: ( test [comp_for] |
+# test '=' test |
+# '**' test |
+# '*' test )
+
+comp_iter: comp_for | comp_if
+comp_for: 'for' exprlist 'in' or_test [comp_iter]
+comp_if: 'if' test_nocond [comp_iter]
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [yield_arg]
+yield_arg: 'from' test | testlist
diff --git a/pypy/interpreter/pyparser/pygram.py b/pypy/interpreter/pyparser/pygram.py
--- a/pypy/interpreter/pyparser/pygram.py
+++ b/pypy/interpreter/pyparser/pygram.py
@@ -9,7 +9,7 @@
def _get_python_grammar():
here = os.path.dirname(__file__)
- fp = open(os.path.join(here, "data", "Grammar3.3"))
+ fp = open(os.path.join(here, "data", "Grammar3.5"))
try:
gram_source = fp.read()
finally:
diff --git a/pypy/interpreter/pyparser/pytoken.py b/pypy/interpreter/pyparser/pytoken.py
--- a/pypy/interpreter/pyparser/pytoken.py
+++ b/pypy/interpreter/pyparser/pytoken.py
@@ -61,6 +61,7 @@
_add_tok('DOUBLESLASH', "//" )
_add_tok('DOUBLESLASHEQUAL',"//=" )
_add_tok('AT', "@" )
+_add_tok('ATEQUAL', "@=" )
_add_tok('RARROW', "->")
_add_tok('ELLIPSIS', "...")
_add_tok('OP')
diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py
--- a/pypy/module/cpyext/number.py
+++ b/pypy/module/cpyext/number.py
@@ -95,6 +95,7 @@
('Xor', 'xor'),
('Or', 'or_'),
('Divmod', 'divmod'),
+ ('MatrixMultiply', 'matmul')
]:
make_numbermethod(name, spacemeth)
if name != 'Divmod':
diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py
--- a/pypy/module/operator/__init__.py
+++ b/pypy/module/operator/__init__.py
@@ -28,7 +28,7 @@
'le', 'lshift', 'lt', 'mod', 'mul',
'ne', 'neg', 'not_', 'or_',
'pos', 'pow', 'rshift', 'setitem',
- 'sub', 'truediv', 'truth', 'xor',
+ 'sub', 'truediv', 'matmul', 'truth', 'xor',
'iadd', 'iand', 'iconcat', 'ifloordiv',
'ilshift', 'imod', 'imul', 'ior', 'ipow',
'irshift', 'isub', 'itruediv', 'ixor', '_length_hint',
@@ -72,6 +72,7 @@
'__sub__' : 'sub',
'__truediv__' : 'truediv',
'__xor__' : 'xor',
+ '__matmul__' : 'matmul',
# in-place
'__iadd__' : 'iadd',
'__iand__' : 'iand',
diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py
--- a/pypy/module/operator/interp_operator.py
+++ b/pypy/module/operator/interp_operator.py
@@ -143,6 +143,10 @@
'xor(a, b) -- Same as a ^ b.'
return space.xor(w_a, w_b)
+def matmul(space, w_a, w_b):
+ 'matmul(a, b) -- Same as a @ b.'
+ return space.matmul(w_a, w_b)
+
# in-place operations
def iadd(space, w_obj1, w_obj2):
@@ -193,6 +197,10 @@
'ixor(a, b) -- Same as a ^= b.'
return space.inplace_xor(w_a, w_b)
+def imatmul(space, w_a, w_b):
+ 'imatmul(a, b) -- Same as a @= b.'
+ return space.inplace_matmul(w_a, w_b)
+
def iconcat(space, w_obj1, w_obj2):
'iconcat(a, b) -- Same as a += b, for a and b sequences.'
if (space.lookup(w_obj1, '__getitem__') is None or
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -6,7 +6,7 @@
from pypy.interpreter import gateway
#XXX # the release serial 42 is not in range(16)
-CPYTHON_VERSION = (3, 3, 5, "final", 0)
+CPYTHON_VERSION = (3, 5, 1, "final", 0)
#XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h
diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py
--- a/pypy/objspace/std/intobject.py
+++ b/pypy/objspace/std/intobject.py
@@ -275,6 +275,7 @@
descr_add, descr_radd = _abstract_binop('add')
descr_sub, descr_rsub = _abstract_binop('sub')
descr_mul, descr_rmul = _abstract_binop('mul')
+ descr_matmul, descr_rmatmul = _abstract_binop('matmul')
descr_and, descr_rand = _abstract_binop('and')
descr_or, descr_ror = _abstract_binop('or')
diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py
--- a/pypy/objspace/std/util.py
+++ b/pypy/objspace/std/util.py
@@ -15,7 +15,7 @@
BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>',
'xor': '^'}
BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-',
- truediv='/', **BINARY_BITWISE_OPS)
+ truediv='/', matmul='@', **BINARY_BITWISE_OPS)
COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor')
diff --git a/pypy/tool/opcode3.py b/pypy/tool/opcode3.py
--- a/pypy/tool/opcode3.py
+++ b/pypy/tool/opcode3.py
@@ -5,6 +5,7 @@
"Backported" from Python 3 to Python 2 land - an excact copy of lib-python/3/opcode.py
"""
+
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
More information about the pypy-commit
mailing list