[pypy-commit] pypy numpy-indexing-by-arrays: merge in default
justinpeel
noreply at buildbot.pypy.org
Thu Oct 6 06:31:34 CEST 2011
Author: Justin Peel <notmuchtotell at gmail.com>
Branch: numpy-indexing-by-arrays
Changeset: r47841:12c31a30642d
Date: 2011-10-05 22:31 -0600
http://bitbucket.org/pypy/pypy/changeset/12c31a30642d/
Log: merge in default
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -36,48 +36,45 @@
print >> sys.stderr, "Warning: could not guess file type, using 'dot'"
return 'unknown'
-def dot2plain(content, contenttype, use_codespeak=False):
- if contenttype == 'plain':
- # already a .plain file
- return content
+def dot2plain_graphviz(content, contenttype, use_codespeak=False):
+ if contenttype != 'neato':
+ cmdline = 'dot -Tplain'
+ else:
+ cmdline = 'neato -Tplain'
+ #print >> sys.stderr, '* running:', cmdline
+ close_fds = sys.platform != 'win32'
+ p = subprocess.Popen(cmdline, shell=True, close_fds=close_fds,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ (child_in, child_out) = (p.stdin, p.stdout)
+ try:
+ import thread
+ except ImportError:
+ bkgndwrite(child_in, content)
+ else:
+ thread.start_new_thread(bkgndwrite, (child_in, content))
+ plaincontent = child_out.read()
+ child_out.close()
+ if not plaincontent: # 'dot' is likely not installed
+ raise PlainParseError("no result from running 'dot'")
+ return plaincontent
- if not use_codespeak:
- if contenttype != 'neato':
- cmdline = 'dot -Tplain'
- else:
- cmdline = 'neato -Tplain'
- #print >> sys.stderr, '* running:', cmdline
- close_fds = sys.platform != 'win32'
- p = subprocess.Popen(cmdline, shell=True, close_fds=close_fds,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- (child_in, child_out) = (p.stdin, p.stdout)
- try:
- import thread
- except ImportError:
- bkgndwrite(child_in, content)
- else:
- thread.start_new_thread(bkgndwrite, (child_in, content))
- plaincontent = child_out.read()
- child_out.close()
- if not plaincontent: # 'dot' is likely not installed
- raise PlainParseError("no result from running 'dot'")
- else:
- import urllib
- request = urllib.urlencode({'dot': content})
- url = 'http://codespeak.net/pypy/convertdot.cgi'
- print >> sys.stderr, '* posting:', url
- g = urllib.urlopen(url, data=request)
- result = []
- while True:
- data = g.read(16384)
- if not data:
- break
- result.append(data)
- g.close()
- plaincontent = ''.join(result)
- # very simple-minded way to give a somewhat better error message
- if plaincontent.startswith('<body'):
- raise Exception("the dot on codespeak has very likely crashed")
+def dot2plain_codespeak(content, contenttype):
+ import urllib
+ request = urllib.urlencode({'dot': content})
+ url = 'http://codespeak.net/pypy/convertdot.cgi'
+ print >> sys.stderr, '* posting:', url
+ g = urllib.urlopen(url, data=request)
+ result = []
+ while True:
+ data = g.read(16384)
+ if not data:
+ break
+ result.append(data)
+ g.close()
+ plaincontent = ''.join(result)
+ # very simple-minded way to give a somewhat better error message
+ if plaincontent.startswith('<body'):
+ raise Exception("the dot on codespeak has very likely crashed")
return plaincontent
def bkgndwrite(f, data):
@@ -148,10 +145,13 @@
def parse_dot(graph_id, content, links={}, fixedfont=False):
contenttype = guess_type(content)
- try:
- plaincontent = dot2plain(content, contenttype, use_codespeak=False)
- return list(parse_plain(graph_id, plaincontent, links, fixedfont))
- except PlainParseError:
- # failed, retry via codespeak
- plaincontent = dot2plain(content, contenttype, use_codespeak=True)
- return list(parse_plain(graph_id, plaincontent, links, fixedfont))
+ if contenttype == 'plain':
+ plaincontent = content
+ else:
+ try:
+ plaincontent = dot2plain_graphviz(content, contenttype)
+ except PlainParseError, e:
+ print e
+ # failed, retry via codespeak
+ plaincontent = dot2plain_codespeak(content, contenttype)
+ return list(parse_plain(graph_id, plaincontent, links, fixedfont))
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -317,7 +317,7 @@
RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'),
RegrTest('test_multibytecodec_support.py', skip="not a test"),
RegrTest('test_multifile.py'),
- RegrTest('test_multiprocessing.py', skip='FIXME leaves subprocesses'),
+ RegrTest('test_multiprocessing.py', skip="FIXME leaves subprocesses"),
RegrTest('test_mutants.py', core="possibly"),
RegrTest('test_mutex.py'),
RegrTest('test_netrc.py'),
diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py
deleted file mode 100644
--- a/lib-python/modified-2.7/gzip.py
+++ /dev/null
@@ -1,514 +0,0 @@
-"""Functions that read and write gzipped files.
-
-The user of the file doesn't have to worry about the compression,
-but random access is not allowed."""
-
-# based on Andrew Kuchling's minigzip.py distributed with the zlib module
-
-import struct, sys, time, os
-import zlib
-import io
-import __builtin__
-
-__all__ = ["GzipFile","open"]
-
-FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
-
-READ, WRITE = 1, 2
-
-def write32u(output, value):
- # The L format writes the bit pattern correctly whether signed
- # or unsigned.
- output.write(struct.pack("<L", value))
-
-def read32(input):
- return struct.unpack("<I", input.read(4))[0]
-
-def open(filename, mode="rb", compresslevel=9):
- """Shorthand for GzipFile(filename, mode, compresslevel).
-
- The filename argument is required; mode defaults to 'rb'
- and compresslevel defaults to 9.
-
- """
- return GzipFile(filename, mode, compresslevel)
-
-class GzipFile(io.BufferedIOBase):
- """The GzipFile class simulates most of the methods of a file object with
- the exception of the readinto() and truncate() methods.
-
- """
-
- myfileobj = None
- max_read_chunk = 10 * 1024 * 1024 # 10Mb
-
- def __init__(self, filename=None, mode=None,
- compresslevel=9, fileobj=None, mtime=None):
- """Constructor for the GzipFile class.
-
- At least one of fileobj and filename must be given a
- non-trivial value.
-
- The new class instance is based on fileobj, which can be a regular
- file, a StringIO object, or any other object which simulates a file.
- It defaults to None, in which case filename is opened to provide
- a file object.
-
- When fileobj is not None, the filename argument is only used to be
- included in the gzip file header, which may includes the original
- filename of the uncompressed file. It defaults to the filename of
- fileobj, if discernible; otherwise, it defaults to the empty string,
- and in this case the original filename is not included in the header.
-
- The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
- depending on whether the file will be read or written. The default
- is the mode of fileobj if discernible; otherwise, the default is 'rb'.
- Be aware that only the 'rb', 'ab', and 'wb' values should be used
- for cross-platform portability.
-
- The compresslevel argument is an integer from 1 to 9 controlling the
- level of compression; 1 is fastest and produces the least compression,
- and 9 is slowest and produces the most compression. The default is 9.
-
- The mtime argument is an optional numeric timestamp to be written
- to the stream when compressing. All gzip compressed streams
- are required to contain a timestamp. If omitted or None, the
- current time is used. This module ignores the timestamp when
- decompressing; however, some programs, such as gunzip, make use
- of it. The format of the timestamp is the same as that of the
- return value of time.time() and of the st_mtime member of the
- object returned by os.stat().
-
- """
-
- # guarantee the file is opened in binary mode on platforms
- # that care about that sort of thing
- if mode and 'b' not in mode:
- mode += 'b'
- if fileobj is None:
- fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
- if filename is None:
- if hasattr(fileobj, 'name'): filename = fileobj.name
- else: filename = ''
- if mode is None:
- if hasattr(fileobj, 'mode'): mode = fileobj.mode
- else: mode = 'rb'
-
- if mode[0:1] == 'r':
- self.mode = READ
- # Set flag indicating start of a new member
- self._new_member = True
- # Buffer data read from gzip file. extrastart is offset in
- # stream where buffer starts. extrasize is number of
- # bytes remaining in buffer from current stream position.
- self.extrabuf = ""
- self.extrasize = 0
- self.extrastart = 0
- self.name = filename
- # Starts small, scales exponentially
- self.min_readsize = 100
-
- elif mode[0:1] == 'w' or mode[0:1] == 'a':
- self.mode = WRITE
- self._init_write(filename)
- self.compress = zlib.compressobj(compresslevel,
- zlib.DEFLATED,
- -zlib.MAX_WBITS,
- zlib.DEF_MEM_LEVEL,
- 0)
- else:
- raise IOError, "Mode " + mode + " not supported"
-
- self.fileobj = fileobj
- self.offset = 0
- self.mtime = mtime
-
- if self.mode == WRITE:
- self._write_gzip_header()
-
- @property
- def filename(self):
- import warnings
- warnings.warn("use the name attribute", DeprecationWarning, 2)
- if self.mode == WRITE and self.name[-3:] != ".gz":
- return self.name + ".gz"
- return self.name
-
- def __repr__(self):
- s = repr(self.fileobj)
- return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
-
- def _check_closed(self):
- """Raises a ValueError if the underlying file object has been closed.
-
- """
- if self.closed:
- raise ValueError('I/O operation on closed file.')
-
- def _init_write(self, filename):
- self.name = filename
- self.crc = zlib.crc32("") & 0xffffffffL
- self.size = 0
- self.writebuf = []
- self.bufsize = 0
-
- def _write_gzip_header(self):
- self.fileobj.write('\037\213') # magic header
- self.fileobj.write('\010') # compression method
- fname = os.path.basename(self.name)
- if fname.endswith(".gz"):
- fname = fname[:-3]
- flags = 0
- if fname:
- flags = FNAME
- self.fileobj.write(chr(flags))
- mtime = self.mtime
- if mtime is None:
- mtime = time.time()
- write32u(self.fileobj, long(mtime))
- self.fileobj.write('\002')
- self.fileobj.write('\377')
- if fname:
- self.fileobj.write(fname + '\000')
-
- def _init_read(self):
- self.crc = zlib.crc32("") & 0xffffffffL
- self.size = 0
-
- def _read_gzip_header(self):
- magic = self.fileobj.read(2)
- if magic != '\037\213':
- raise IOError, 'Not a gzipped file'
- method = ord( self.fileobj.read(1) )
- if method != 8:
- raise IOError, 'Unknown compression method'
- flag = ord( self.fileobj.read(1) )
- self.mtime = read32(self.fileobj)
- # extraflag = self.fileobj.read(1)
- # os = self.fileobj.read(1)
- self.fileobj.read(2)
-
- if flag & FEXTRA:
- # Read & discard the extra field, if present
- xlen = ord(self.fileobj.read(1))
- xlen = xlen + 256*ord(self.fileobj.read(1))
- self.fileobj.read(xlen)
- if flag & FNAME:
- # Read and discard a null-terminated string containing the filename
- while True:
- s = self.fileobj.read(1)
- if not s or s=='\000':
- break
- if flag & FCOMMENT:
- # Read and discard a null-terminated string containing a comment
- while True:
- s = self.fileobj.read(1)
- if not s or s=='\000':
- break
- if flag & FHCRC:
- self.fileobj.read(2) # Read & discard the 16-bit header CRC
-
- def write(self,data):
- self._check_closed()
- if self.mode != WRITE:
- import errno
- raise IOError(errno.EBADF, "write() on read-only GzipFile object")
-
- if self.fileobj is None:
- raise ValueError, "write() on closed GzipFile object"
-
- # Convert data type if called by io.BufferedWriter.
- if isinstance(data, memoryview):
- data = data.tobytes()
-
- if len(data) > 0:
- self.size = self.size + len(data)
- self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
- self.fileobj.write( self.compress.compress(data) )
- self.offset += len(data)
-
- return len(data)
-
- def read(self, size=-1):
- self._check_closed()
- if self.mode != READ:
- import errno
- raise IOError(errno.EBADF, "read() on write-only GzipFile object")
-
- if self.extrasize <= 0 and self.fileobj is None:
- return ''
-
- readsize = 1024
- if size < 0: # get the whole thing
- try:
- while True:
- self._read(readsize)
- readsize = min(self.max_read_chunk, readsize * 2)
- except EOFError:
- size = self.extrasize
- elif size == 0:
- return ""
- else: # just get some more of it
- try:
- while size > self.extrasize:
- self._read(readsize)
- readsize = min(self.max_read_chunk, readsize * 2)
- except EOFError:
- if size > self.extrasize:
- size = self.extrasize
-
- offset = self.offset - self.extrastart
- chunk = self.extrabuf[offset: offset + size]
- self.extrasize = self.extrasize - size
-
- self.offset += size
- return chunk
-
- def _unread(self, buf):
- self.extrasize = len(buf) + self.extrasize
- self.offset -= len(buf)
-
- def _read(self, size=1024):
- if self.fileobj is None:
- raise EOFError, "Reached EOF"
-
- if self._new_member:
- # If the _new_member flag is set, we have to
- # jump to the next member, if there is one.
- #
- # First, check if we're at the end of the file;
- # if so, it's time to stop; no more members to read.
- pos = self.fileobj.tell() # Save current position
- self.fileobj.seek(0, 2) # Seek to end of file
- if pos == self.fileobj.tell():
- raise EOFError, "Reached EOF"
- else:
- self.fileobj.seek( pos ) # Return to original position
-
- self._init_read()
- self._read_gzip_header()
- self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
- self._new_member = False
-
- # Read a chunk of data from the file
- buf = self.fileobj.read(size)
-
- # If the EOF has been reached, flush the decompression object
- # and mark this object as finished.
-
- if buf == "":
- uncompress = self.decompress.flush()
- self._read_eof()
- self._add_read_data( uncompress )
- raise EOFError, 'Reached EOF'
-
- uncompress = self.decompress.decompress(buf)
- self._add_read_data( uncompress )
-
- if self.decompress.unused_data != "":
- # Ending case: we've come to the end of a member in the file,
- # so seek back to the start of the unused data, finish up
- # this member, and read a new gzip header.
- # (The number of bytes to seek back is the length of the unused
- # data, minus 8 because _read_eof() will rewind a further 8 bytes)
- self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
-
- # Check the CRC and file size, and set the flag so we read
- # a new member on the next call
- self._read_eof()
- self._new_member = True
-
- def _add_read_data(self, data):
- self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
- offset = self.offset - self.extrastart
- self.extrabuf = self.extrabuf[offset:] + data
- self.extrasize = self.extrasize + len(data)
- self.extrastart = self.offset
- self.size = self.size + len(data)
-
- def _read_eof(self):
- # We've read to the end of the file, so we have to rewind in order
- # to reread the 8 bytes containing the CRC and the file size.
- # We check the that the computed CRC and size of the
- # uncompressed data matches the stored values. Note that the size
- # stored is the true file size mod 2**32.
- self.fileobj.seek(-8, 1)
- crc32 = read32(self.fileobj)
- isize = read32(self.fileobj) # may exceed 2GB
- if crc32 != self.crc:
- raise IOError("CRC check failed %s != %s" % (hex(crc32),
- hex(self.crc)))
- elif isize != (self.size & 0xffffffffL):
- raise IOError, "Incorrect length of data produced"
-
- # Gzip files can be padded with zeroes and still have archives.
- # Consume all zero bytes and set the file position to the first
- # non-zero byte. See http://www.gzip.org/#faq8
- c = "\x00"
- while c == "\x00":
- c = self.fileobj.read(1)
- if c:
- self.fileobj.seek(-1, 1)
-
- @property
- def closed(self):
- return self.fileobj is None
-
- def close(self):
- if self.fileobj is None:
- return
- if self.mode == WRITE:
- self.fileobj.write(self.compress.flush())
- write32u(self.fileobj, self.crc)
- # self.size may exceed 2GB, or even 4GB
- write32u(self.fileobj, self.size & 0xffffffffL)
- self.fileobj = None
- elif self.mode == READ:
- self.fileobj = None
- if self.myfileobj:
- self.myfileobj.close()
- self.myfileobj = None
-
- def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
- self._check_closed()
- if self.mode == WRITE:
- # Ensure the compressor's buffer is flushed
- self.fileobj.write(self.compress.flush(zlib_mode))
- self.fileobj.flush()
-
- def fileno(self):
- """Invoke the underlying file object's fileno() method.
-
- This will raise AttributeError if the underlying file object
- doesn't support fileno().
- """
- return self.fileobj.fileno()
-
- def rewind(self):
- '''Return the uncompressed stream file position indicator to the
- beginning of the file'''
- if self.mode != READ:
- raise IOError("Can't rewind in write mode")
- self.fileobj.seek(0)
- self._new_member = True
- self.extrabuf = ""
- self.extrasize = 0
- self.extrastart = 0
- self.offset = 0
-
- def readable(self):
- return self.mode == READ
-
- def writable(self):
- return self.mode == WRITE
-
- def seekable(self):
- return True
-
- def seek(self, offset, whence=0):
- if whence:
- if whence == 1:
- offset = self.offset + offset
- else:
- raise ValueError('Seek from end not supported')
- if self.mode == WRITE:
- if offset < self.offset:
- raise IOError('Negative seek in write mode')
- count = offset - self.offset
- for i in range(count // 1024):
- self.write(1024 * '\0')
- self.write((count % 1024) * '\0')
- elif self.mode == READ:
- if offset == self.offset:
- self.read(0) # to make sure that this file is open
- return self.offset
- if offset < self.offset:
- # for negative seek, rewind and do positive seek
- self.rewind()
- count = offset - self.offset
- for i in range(count // 1024):
- self.read(1024)
- self.read(count % 1024)
-
- return self.offset
-
- def readline(self, size=-1):
- if size < 0:
- # Shortcut common case - newline found in buffer.
- offset = self.offset - self.extrastart
- i = self.extrabuf.find('\n', offset) + 1
- if i > 0:
- self.extrasize -= i - offset
- self.offset += i - offset
- return self.extrabuf[offset: i]
-
- size = sys.maxint
- readsize = self.min_readsize
- else:
- readsize = size
- bufs = []
- while size != 0:
- c = self.read(readsize)
- i = c.find('\n')
-
- # We set i=size to break out of the loop under two
- # conditions: 1) there's no newline, and the chunk is
- # larger than size, or 2) there is a newline, but the
- # resulting line would be longer than 'size'.
- if (size <= i) or (i == -1 and len(c) > size):
- i = size - 1
-
- if i >= 0 or c == '':
- bufs.append(c[:i + 1]) # Add portion of last chunk
- self._unread(c[i + 1:]) # Push back rest of chunk
- break
-
- # Append chunk to list, decrease 'size',
- bufs.append(c)
- size = size - len(c)
- readsize = min(size, readsize * 2)
- if readsize > self.min_readsize:
- self.min_readsize = min(readsize, self.min_readsize * 2, 512)
- return ''.join(bufs) # Return resulting line
-
-
-def _test():
- # Act like gzip; with -d, act like gunzip.
- # The input file is not deleted, however, nor are any other gzip
- # options or features supported.
- args = sys.argv[1:]
- decompress = args and args[0] == "-d"
- if decompress:
- args = args[1:]
- if not args:
- args = ["-"]
- for arg in args:
- if decompress:
- if arg == "-":
- f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
- g = sys.stdout
- else:
- if arg[-3:] != ".gz":
- print "filename doesn't end in .gz:", repr(arg)
- continue
- f = open(arg, "rb")
- g = __builtin__.open(arg[:-3], "wb")
- else:
- if arg == "-":
- f = sys.stdin
- g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
- else:
- f = __builtin__.open(arg, "rb")
- g = open(arg + ".gz", "wb")
- while True:
- chunk = f.read(1024)
- if not chunk:
- break
- g.write(chunk)
- if g is not sys.stdout:
- g.close()
- if f is not sys.stdin:
- f.close()
-
-if __name__ == '__main__':
- _test()
diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py
--- a/lib-python/modified-2.7/tarfile.py
+++ b/lib-python/modified-2.7/tarfile.py
@@ -252,8 +252,8 @@
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
- unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512]))
- signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512]))
+ unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
+ signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
@@ -265,6 +265,7 @@
if length is None:
shutil.copyfileobj(src, dst)
return
+
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
@@ -801,19 +802,19 @@
if self.closed:
raise ValueError("I/O operation on closed file")
+ buf = ""
if self.buffer:
if size is None:
- buf = self.buffer + self.fileobj.read()
+ buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
- buf += self.fileobj.read(size - len(buf))
+
+ if size is None:
+ buf += self.fileobj.read()
else:
- if size is None:
- buf = self.fileobj.read()
- else:
- buf = self.fileobj.read(size)
+ buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
diff --git a/lib-python/modified-2.7/test/test_multiprocessing.py b/lib-python/modified-2.7/test/test_multiprocessing.py
--- a/lib-python/modified-2.7/test/test_multiprocessing.py
+++ b/lib-python/modified-2.7/test/test_multiprocessing.py
@@ -510,7 +510,6 @@
p.join()
- @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
def test_qsize(self):
q = self.Queue()
try:
@@ -532,7 +531,6 @@
time.sleep(DELTA)
q.task_done()
- @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
def test_task_done(self):
queue = self.JoinableQueue()
@@ -1091,7 +1089,6 @@
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
- @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
@@ -1280,7 +1277,6 @@
queue = manager.get_queue()
queue.put('hello world')
- @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
@@ -1297,6 +1293,7 @@
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
+ test_support.gc_collect()
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
@@ -1573,7 +1570,6 @@
ALLOWED_TYPES = ('processes',)
- @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
def test_heap(self):
iterations = 5000
maxblocks = 50
diff --git a/lib-python/modified-2.7/test/test_sys_settrace.py b/lib-python/modified-2.7/test/test_sys_settrace.py
--- a/lib-python/modified-2.7/test/test_sys_settrace.py
+++ b/lib-python/modified-2.7/test/test_sys_settrace.py
@@ -286,11 +286,11 @@
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
- def set_and_retrieve_none(self):
+ def test_set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
- def set_and_retrieve_func(self):
+ def test_set_and_retrieve_func(self):
def fn(*args):
pass
diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py
--- a/lib_pypy/_functools.py
+++ b/lib_pypy/_functools.py
@@ -18,5 +18,5 @@
def __call__(self, *fargs, **fkeywords):
if self.keywords is not None:
- fkeywords.update(self.keywords)
+ fkeywords = dict(self.keywords, **fkeywords)
return self.func(*(self.args + fargs), **fkeywords)
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -96,7 +96,16 @@
@property
def gr_frame(self):
- raise NotImplementedError("attribute 'gr_frame' of greenlet objects")
+ # xxx this doesn't work when called on either the current or
+ # the main greenlet of another thread
+ if self is getcurrent():
+ return None
+ if self.__main:
+ self = getcurrent()
+ f = _continulet.__reduce__(self)[2][0]
+ if not f:
+ return None
+ return f.f_back.f_back.f_back # go past start(), __switch(), switch()
# ____________________________________________________________
# Internal stuff
diff --git a/lib_pypy/pypy_test/test_stackless_pickling.py b/lib_pypy/pypy_test/test_stackless_pickling.py
--- a/lib_pypy/pypy_test/test_stackless_pickling.py
+++ b/lib_pypy/pypy_test/test_stackless_pickling.py
@@ -1,7 +1,3 @@
-"""
-this test should probably not run from CPython or py.py.
-I'm not entirely sure, how to do that.
-"""
from __future__ import absolute_import
from py.test import skip
try:
@@ -16,11 +12,15 @@
class Test_StacklessPickling:
+ def test_pickle_main_coroutine(self):
+ import stackless, pickle
+ s = pickle.dumps(stackless.coroutine.getcurrent())
+ print s
+ c = pickle.loads(s)
+ assert c is stackless.coroutine.getcurrent()
+
def test_basic_tasklet_pickling(self):
- try:
- import stackless
- except ImportError:
- skip("can't load stackless and don't know why!!!")
+ import stackless
from stackless import run, schedule, tasklet
import pickle
diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py
--- a/lib_pypy/pyrepl/completing_reader.py
+++ b/lib_pypy/pyrepl/completing_reader.py
@@ -229,7 +229,8 @@
def after_command(self, cmd):
super(CompletingReader, self).after_command(cmd)
- if not isinstance(cmd, complete) and not isinstance(cmd, self_insert):
+ if not isinstance(cmd, self.commands['complete']) \
+ and not isinstance(cmd, self.commands['self_insert']):
self.cmpltn_reset()
def calc_screen(self):
diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py
--- a/lib_pypy/stackless.py
+++ b/lib_pypy/stackless.py
@@ -5,7 +5,6 @@
"""
-import traceback
import _continuation
class TaskletExit(Exception):
@@ -14,33 +13,46 @@
CoroutineExit = TaskletExit
+def _coroutine_getcurrent():
+ "Returns the current coroutine (i.e. the one which called this function)."
+ try:
+ return _tls.current_coroutine
+ except AttributeError:
+ # first call in this thread: current == main
+ return _coroutine_getmain()
+
+def _coroutine_getmain():
+ try:
+ return _tls.main_coroutine
+ except AttributeError:
+ # create the main coroutine for this thread
+ continulet = _continuation.continulet
+ main = coroutine()
+ main._frame = continulet.__new__(continulet)
+ main._is_started = -1
+ _tls.current_coroutine = _tls.main_coroutine = main
+ return _tls.main_coroutine
+
+
class coroutine(object):
- "we can't have continulet as a base, because continulets can't be rebound"
+ _is_started = 0 # 0=no, 1=yes, -1=main
def __init__(self):
self._frame = None
- self.is_zombie = False
-
- def __getattr__(self, attr):
- return getattr(self._frame, attr)
-
- def __del__(self):
- self.is_zombie = True
- del self._frame
- self._frame = None
def bind(self, func, *argl, **argd):
"""coro.bind(f, *argl, **argd) -> None.
binds function f to coro. f will be called with
arguments *argl, **argd
"""
- if self._frame is None or not self._frame.is_pending():
- def run(c):
- _tls.current_coroutine = self
- return func(*argl, **argd)
- self._frame = frame = _continuation.continulet(run)
- else:
+ if self.is_alive:
raise ValueError("cannot bind a bound coroutine")
+ def run(c):
+ _tls.current_coroutine = self
+ self._is_started = 1
+ return func(*argl, **argd)
+ self._is_started = 0
+ self._frame = _continuation.continulet(run)
def switch(self):
"""coro.switch() -> returnvalue
@@ -48,7 +60,7 @@
f finishes, the returnvalue is that of f, otherwise
None is returned
"""
- current = _getcurrent()
+ current = _coroutine_getcurrent()
try:
current._frame.switch(to=self._frame)
finally:
@@ -56,37 +68,30 @@
def kill(self):
"""coro.kill() : kill coroutine coro"""
- current = _getcurrent()
+ current = _coroutine_getcurrent()
try:
current._frame.throw(CoroutineExit, to=self._frame)
finally:
_tls.current_coroutine = current
- def _is_alive(self):
- if self._frame is None:
- return False
- return not self._frame.is_pending()
- is_alive = property(_is_alive)
- del _is_alive
+ @property
+ def is_alive(self):
+ return self._is_started < 0 or (
+ self._frame is not None and self._frame.is_pending())
- def getcurrent():
- """coroutine.getcurrent() -> the currently running coroutine"""
- return _getcurrent()
- getcurrent = staticmethod(getcurrent)
+ @property
+ def is_zombie(self):
+ return self._is_started > 0 and not self._frame.is_pending()
+
+ getcurrent = staticmethod(_coroutine_getcurrent)
def __reduce__(self):
- raise TypeError, 'pickling is not possible based upon continulets'
+ if self._is_started < 0:
+ return _coroutine_getmain, ()
+ else:
+ return type(self), (), self.__dict__
-def _getcurrent():
- "Returns the current coroutine (i.e. the one which called this function)."
- try:
- return _tls.current_coroutine
- except AttributeError:
- # first call in this thread: current == main
- _coroutine_create_main()
- return _tls.current_coroutine
-
try:
from thread import _local
except ImportError:
@@ -95,14 +100,8 @@
_tls = _local()
-def _coroutine_create_main():
- # create the main coroutine for this thread
- _tls.current_coroutine = None
- main_coroutine = coroutine()
- typ = _continuation.continulet
- main_coroutine._frame = typ.__new__(typ)
- _tls.main_coroutine = main_coroutine
- _tls.current_coroutine = main_coroutine
+
+# ____________________________________________________________
from collections import deque
@@ -148,10 +147,7 @@
_last_task = next
assert not next.blocked
if next is not current:
- #try:
- next.switch()
- #except CoroutineExit: --- they are the same anyway
- # raise TaskletExit
+ next.switch()
return current
def set_schedule_callback(callback):
@@ -175,34 +171,6 @@
raise self.type, self.value, self.traceback
#
-# helpers for pickling
-#
-
-_stackless_primitive_registry = {}
-
-def register_stackless_primitive(thang, retval_expr='None'):
- import types
- func = thang
- if isinstance(thang, types.MethodType):
- func = thang.im_func
- code = func.func_code
- _stackless_primitive_registry[code] = retval_expr
- # It is not too nice to attach info via the code object, but
- # I can't think of a better solution without a real transform.
-
-def rewrite_stackless_primitive(coro_state, alive, tempval):
- flags, frame, thunk, parent = coro_state
- while frame is not None:
- retval_expr = _stackless_primitive_registry.get(frame.f_code)
- if retval_expr:
- # this tasklet needs to stop pickling here and return its value.
- tempval = eval(retval_expr, globals(), frame.f_locals)
- coro_state = flags, frame, thunk, parent
- break
- frame = frame.f_back
- return coro_state, alive, tempval
-
-#
#
class channel(object):
@@ -354,8 +322,6 @@
"""
return self._channel_action(None, -1)
- register_stackless_primitive(receive, retval_expr='receiver.tempval')
-
def send_exception(self, exp_type, msg):
self.send(bomb(exp_type, exp_type(msg)))
@@ -372,9 +338,8 @@
the runnables list.
"""
return self._channel_action(msg, 1)
-
- register_stackless_primitive(send)
-
+
+
class tasklet(coroutine):
"""
A tasklet object represents a tiny task in a Python thread.
@@ -456,7 +421,7 @@
self.func = None
coroutine.bind(self, _func)
- back = _getcurrent()
+ back = _coroutine_getcurrent()
coroutine.switch(self)
self.alive = True
_scheduler_append(self)
@@ -480,39 +445,6 @@
raise RuntimeError, "The current tasklet cannot be removed."
# not sure if I will revive this " Use t=tasklet().capture()"
_scheduler_remove(self)
-
- def __reduce__(self):
- one, two, coro_state = coroutine.__reduce__(self)
- assert one is coroutine
- assert two == ()
- # we want to get rid of the parent thing.
- # for now, we just drop it
- a, frame, c, d = coro_state
-
- # Removing all frames related to stackless.py.
- # They point to stuff we don't want to be pickled.
-
- pickleframe = frame
- while frame is not None:
- if frame.f_code == schedule.func_code:
- # Removing everything including and after the
- # call to stackless.schedule()
- pickleframe = frame.f_back
- break
- frame = frame.f_back
- if d:
- assert isinstance(d, coroutine)
- coro_state = a, pickleframe, c, None
- coro_state, alive, tempval = rewrite_stackless_primitive(coro_state, self.alive, self.tempval)
- inst_dict = self.__dict__.copy()
- inst_dict.pop('tempval', None)
- return self.__class__, (), (coro_state, alive, tempval, inst_dict)
-
- def __setstate__(self, (coro_state, alive, tempval, inst_dict)):
- coroutine.__setstate__(self, coro_state)
- self.__dict__.update(inst_dict)
- self.alive = alive
- self.tempval = tempval
def getmain():
"""
@@ -601,30 +533,7 @@
global _last_task
_global_task_id = 0
_main_tasklet = coroutine.getcurrent()
- try:
- _main_tasklet.__class__ = tasklet
- except TypeError: # we are running pypy-c
- class TaskletProxy(object):
- """TaskletProxy is needed to give the _main_coroutine tasklet behaviour"""
- def __init__(self, coro):
- self._coro = coro
-
- def __getattr__(self,attr):
- return getattr(self._coro,attr)
-
- def __str__(self):
- return '<tasklet %s a:%s>' % (self._task_id, self.is_alive)
-
- def __reduce__(self):
- return getmain, ()
-
- __repr__ = __str__
-
-
- global _main_coroutine
- _main_coroutine = _main_tasklet
- _main_tasklet = TaskletProxy(_main_tasklet)
- assert _main_tasklet.is_alive and not _main_tasklet.is_zombie
+ _main_tasklet.__class__ = tasklet # XXX HAAAAAAAAAAAAAAAAAAAAACK
_last_task = _main_tasklet
tasklet._init.im_func(_main_tasklet, label='main')
_squeue = deque()
diff --git a/py/_code/source.py b/py/_code/source.py
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -139,7 +139,7 @@
trysource = self[start:end]
if trysource.isparseable():
return start, end
- return start, end
+ return start, len(self)
def getblockend(self, lineno):
# XXX
diff --git a/pypy/annotation/policy.py b/pypy/annotation/policy.py
--- a/pypy/annotation/policy.py
+++ b/pypy/annotation/policy.py
@@ -1,6 +1,6 @@
# base annotation policy for specialization
from pypy.annotation.specialize import default_specialize as default
-from pypy.annotation.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype
+from pypy.annotation.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype, specialize_arg_or_var
from pypy.annotation.specialize import memo, specialize_call_location
# for some reason, model must be imported first,
# or we create a cycle.
@@ -73,6 +73,7 @@
default_specialize = staticmethod(default)
specialize__memo = staticmethod(memo)
specialize__arg = staticmethod(specialize_argvalue) # specialize:arg(N)
+ specialize__arg_or_var = staticmethod(specialize_arg_or_var)
specialize__argtype = staticmethod(specialize_argtype) # specialize:argtype(N)
specialize__arglistitemtype = staticmethod(specialize_arglistitemtype)
specialize__call_location = staticmethod(specialize_call_location)
diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py
--- a/pypy/annotation/specialize.py
+++ b/pypy/annotation/specialize.py
@@ -353,6 +353,16 @@
key = tuple(key)
return maybe_star_args(funcdesc, key, args_s)
+def specialize_arg_or_var(funcdesc, args_s, *argindices):
+ for argno in argindices:
+ if not args_s[argno].is_constant():
+ break
+ else:
+ # all constant
+ return specialize_argvalue(funcdesc, args_s, *argindices)
+ # some not constant
+ return maybe_star_args(funcdesc, None, args_s)
+
def specialize_argtype(funcdesc, args_s, *argindices):
key = tuple([args_s[i].knowntype for i in argindices])
for cls in key:
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -1194,6 +1194,20 @@
assert len(executedesc._cache[(0, 'star', 2)].startblock.inputargs) == 4
assert len(executedesc._cache[(1, 'star', 3)].startblock.inputargs) == 5
+ def test_specialize_arg_or_var(self):
+ def f(a):
+ return 1
+ f._annspecialcase_ = 'specialize:arg_or_var(0)'
+
+ def fn(a):
+ return f(3) + f(a)
+
+ a = self.RPythonAnnotator()
+ a.build_types(fn, [int])
+ executedesc = a.bookkeeper.getdesc(f)
+ assert sorted(executedesc._cache.keys()) == [None, (3,)]
+ # we got two different special
+
def test_specialize_call_location(self):
def g(a):
return a
@@ -3190,6 +3204,8 @@
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeList)
assert not s.listdef.listitem.resized
+ assert not s.listdef.listitem.immutable
+ assert s.listdef.listitem.mutated
def test_delslice(self):
def f():
diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py
--- a/pypy/annotation/unaryop.py
+++ b/pypy/annotation/unaryop.py
@@ -352,6 +352,7 @@
check_negative_slice(s_start, s_stop)
if not isinstance(s_iterable, SomeList):
raise Exception("list[start:stop] = x: x must be a list")
+ lst.listdef.mutate()
lst.listdef.agree(s_iterable.listdef)
# note that setslice is not allowed to resize a list in RPython
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -53,6 +53,18 @@
this is an ideal task to get started, because it does not require any deep
knowledge of the internals.
+Optimized Unicode Representation
+--------------------------------
+
+CPython 3.3 will use an `optimized unicode representation`_ which switches between
+different ways to represent a unicode string, depending on whether the string
+fits into ASCII, has only two-byte characters or needs four-byte characters.
+
+The actual details would be rather differen in PyPy, but we would like to have
+the same optimization implemented.
+
+.. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/
+
Translation Toolchain
---------------------
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -3,12 +3,12 @@
from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag
from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction
from pypy.interpreter.error import OperationError, operationerrfmt
-from pypy.interpreter.error import new_exception_class
+from pypy.interpreter.error import new_exception_class, typed_unwrap_error_msg
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals
from pypy.tool.cache import Cache
from pypy.tool.uid import HUGEVAL_BYTES
-from pypy.rlib.objectmodel import we_are_translated, newlist
+from pypy.rlib.objectmodel import we_are_translated, newlist, compute_unique_id
from pypy.rlib.debug import make_sure_not_resized
from pypy.rlib.timer import DummyTimer, Timer
from pypy.rlib.rarithmetic import r_uint
@@ -186,6 +186,28 @@
def _set_mapdict_storage_and_map(self, storage, map):
raise NotImplementedError
+ # -------------------------------------------------------------------
+
+ def str_w(self, space):
+ w_msg = typed_unwrap_error_msg(space, "string", self)
+ raise OperationError(space.w_TypeError, w_msg)
+
+ def unicode_w(self, space):
+ raise OperationError(space.w_TypeError,
+ typed_unwrap_error_msg(space, "unicode", self))
+
+ def int_w(self, space):
+ raise OperationError(space.w_TypeError,
+ typed_unwrap_error_msg(space, "integer", self))
+
+ def uint_w(self, space):
+ raise OperationError(space.w_TypeError,
+ typed_unwrap_error_msg(space, "integer", self))
+
+ def bigint_w(self, space):
+ raise OperationError(space.w_TypeError,
+ typed_unwrap_error_msg(space, "integer", self))
+
class Wrappable(W_Root):
"""A subclass of Wrappable is an internal, interpreter-level class
@@ -901,7 +923,7 @@
ec.c_call_trace(frame, w_func, args)
try:
w_res = self.call_args(w_func, args)
- except OperationError, e:
+ except OperationError:
ec.c_exception_trace(frame, w_func)
raise
ec.c_return_trace(frame, w_func, args)
@@ -947,6 +969,9 @@
def isinstance_w(self, w_obj, w_type):
return self.is_true(self.isinstance(w_obj, w_type))
+ def id(self, w_obj):
+ return self.wrap(compute_unique_id(w_obj))
+
# The code below only works
# for the simple case (new-style instance).
# These methods are patched with the full logic by the __builtin__
@@ -999,8 +1024,6 @@
def eval(self, expression, w_globals, w_locals, hidden_applevel=False):
"NOT_RPYTHON: For internal debugging."
- import types
- from pypy.interpreter.pycode import PyCode
if isinstance(expression, str):
compiler = self.createcompiler()
expression = compiler.compile(expression, '?', 'eval', 0,
@@ -1012,7 +1035,6 @@
def exec_(self, statement, w_globals, w_locals, hidden_applevel=False,
filename=None):
"NOT_RPYTHON: For internal debugging."
- import types
if filename is None:
filename = '?'
from pypy.interpreter.pycode import PyCode
@@ -1210,6 +1232,18 @@
return None
return self.str_w(w_obj)
+ def str_w(self, w_obj):
+ return w_obj.str_w(self)
+
+ def int_w(self, w_obj):
+ return w_obj.int_w(self)
+
+ def uint_w(self, w_obj):
+ return w_obj.uint_w(self)
+
+ def bigint_w(self, w_obj):
+ return w_obj.bigint_w(self)
+
def realstr_w(self, w_obj):
# Like str_w, but only works if w_obj is really of type 'str'.
if not self.is_true(self.isinstance(w_obj, self.w_str)):
@@ -1217,6 +1251,9 @@
self.wrap('argument must be a string'))
return self.str_w(w_obj)
+ def unicode_w(self, w_obj):
+ return w_obj.unicode_w(self)
+
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -458,3 +458,7 @@
if module:
space.setattr(w_exc, space.wrap("__module__"), space.wrap(module))
return w_exc
+
+def typed_unwrap_error_msg(space, expected, w_obj):
+ type_name = space.type(w_obj).getname(space)
+ return space.wrap("expected %s, got %s object" % (expected, type_name))
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -175,6 +175,9 @@
self.w_tracefunc = w_func
self.space.frame_trace_action.fire()
+ def gettrace(self):
+ return self.w_tracefunc
+
def setprofile(self, w_func):
"""Set the global trace function."""
if self.space.is_w(w_func, self.space.w_None):
@@ -307,7 +310,11 @@
self._nonperiodic_actions = []
self.has_bytecode_counter = False
self.fired_actions = None
- self.checkinterval_scaled = 100 * TICK_COUNTER_STEP
+ # the default value is not 100, unlike CPython 2.7, but a much
+ # larger value, because we use a technique that not only allows
+ # but actually *forces* another thread to run whenever the counter
+ # reaches zero.
+ self.checkinterval_scaled = 10000 * TICK_COUNTER_STEP
self._rebuild_action_dispatcher()
def fire(self, action):
@@ -346,6 +353,7 @@
elif interval > MAX:
interval = MAX
self.checkinterval_scaled = interval * TICK_COUNTER_STEP
+ self.reset_ticker(-1)
def _rebuild_action_dispatcher(self):
periodic_actions = unrolling_iterable(self._periodic_actions)
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -66,7 +66,7 @@
make_sure_not_resized(self.locals_stack_w)
check_nonneg(self.nlocals)
#
- if space.config.objspace.honor__builtins__ and w_globals is not None:
+ if space.config.objspace.honor__builtins__:
self.builtin = space.builtin.pick_builtin(w_globals)
# regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
# class bodies only have CO_NEWLOCALS.
diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py
--- a/pypy/interpreter/test/test_executioncontext.py
+++ b/pypy/interpreter/test/test_executioncontext.py
@@ -42,6 +42,7 @@
assert i == 9
def test_periodic_action(self):
+ from pypy.interpreter.executioncontext import ActionFlag
class DemoAction(executioncontext.PeriodicAsyncAction):
counter = 0
@@ -53,17 +54,20 @@
space = self.space
a2 = DemoAction(space)
- space.actionflag.register_periodic_action(a2, True)
try:
- for i in range(500):
- space.appexec([], """():
- n = 5
- return n + 2
- """)
- except Finished:
- pass
- checkinterval = space.actionflag.getcheckinterval()
- assert checkinterval / 10 < i < checkinterval * 1.1
+ space.actionflag.setcheckinterval(100)
+ space.actionflag.register_periodic_action(a2, True)
+ try:
+ for i in range(500):
+ space.appexec([], """():
+ n = 5
+ return n + 2
+ """)
+ except Finished:
+ pass
+ finally:
+ space.actionflag = ActionFlag() # reset to default
+ assert 10 < i < 110
def test_llprofile(self):
l = []
diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py
--- a/pypy/jit/codewriter/effectinfo.py
+++ b/pypy/jit/codewriter/effectinfo.py
@@ -74,6 +74,7 @@
OS_LLONG_UGE = 91
OS_LLONG_URSHIFT = 92
OS_LLONG_FROM_UINT = 93
+ OS_LLONG_U_TO_FLOAT = 94
#
OS_MATH_SQRT = 100
diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py
--- a/pypy/jit/codewriter/jtransform.py
+++ b/pypy/jit/codewriter/jtransform.py
@@ -440,6 +440,7 @@
rewrite_op_ullong_mod_zer = _do_builtin_call
rewrite_op_gc_identityhash = _do_builtin_call
rewrite_op_gc_id = _do_builtin_call
+ rewrite_op_uint_mod = _do_builtin_call
# ----------
# getfield/setfield/mallocs etc.
@@ -883,9 +884,15 @@
v = v_arg
oplist = []
if unsigned1:
- opname = 'cast_uint_to_longlong'
+ if unsigned2:
+ opname = 'cast_uint_to_ulonglong'
+ else:
+ opname = 'cast_uint_to_longlong'
else:
- opname = 'cast_int_to_longlong'
+ if unsigned2:
+ opname = 'cast_int_to_ulonglong'
+ else:
+ opname = 'cast_int_to_longlong'
op2 = self.rewrite_operation(
SpaceOperation(opname, [v], v_result)
)
@@ -995,6 +1002,21 @@
return op2
''' % (_op, _oopspec.lower(), _oopspec, _oopspec)).compile()
+ for _op, _oopspec in [('cast_int_to_ulonglong', 'FROM_INT'),
+ ('cast_uint_to_ulonglong', 'FROM_UINT'),
+ ('cast_float_to_ulonglong', 'FROM_FLOAT'),
+ ('cast_ulonglong_to_float', 'U_TO_FLOAT'),
+ ]:
+ exec py.code.Source('''
+ def rewrite_op_%s(self, op):
+ args = op.args
+ op1 = self.prepare_builtin_call(op, "ullong_%s", args)
+ op2 = self._handle_oopspec_call(op1, args,
+ EffectInfo.OS_LLONG_%s,
+ EffectInfo.EF_ELIDABLE_CANNOT_RAISE)
+ return op2
+ ''' % (_op, _oopspec.lower(), _oopspec)).compile()
+
def _normalize(self, oplist):
if isinstance(oplist, SpaceOperation):
return [oplist]
diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py
--- a/pypy/jit/codewriter/support.py
+++ b/pypy/jit/codewriter/support.py
@@ -315,18 +315,30 @@
def _ll_1_llong_from_int(x):
return r_longlong(intmask(x))
+def _ll_1_ullong_from_int(x):
+ return r_ulonglong(intmask(x))
+
def _ll_1_llong_from_uint(x):
return r_longlong(r_uint(x))
+def _ll_1_ullong_from_uint(x):
+ return r_ulonglong(r_uint(x))
+
def _ll_1_llong_to_int(xll):
return intmask(xll)
def _ll_1_llong_from_float(xf):
return r_longlong(xf)
+def _ll_1_ullong_from_float(xf):
+ return r_ulonglong(xf)
+
def _ll_1_llong_to_float(xll):
return float(rffi.cast(lltype.SignedLongLong, xll))
+def _ll_1_ullong_u_to_float(xull):
+ return float(rffi.cast(lltype.UnsignedLongLong, xull))
+
def _ll_1_llong_abs(xll):
if xll < 0:
@@ -351,20 +363,23 @@
return llop.llong_mod(lltype.SignedLongLong, xll, yll)
def _ll_2_ullong_floordiv(xll, yll):
- return llop.ullong_floordiv(lltype.SignedLongLong, xll, yll)
+ return llop.ullong_floordiv(lltype.UnsignedLongLong, xll, yll)
def _ll_2_ullong_floordiv_zer(xll, yll):
if yll == 0:
raise ZeroDivisionError
- return llop.ullong_floordiv(lltype.SignedLongLong, xll, yll)
+ return llop.ullong_floordiv(lltype.UnsignedLongLong, xll, yll)
def _ll_2_ullong_mod(xll, yll):
- return llop.ullong_mod(lltype.SignedLongLong, xll, yll)
+ return llop.ullong_mod(lltype.UnsignedLongLong, xll, yll)
def _ll_2_ullong_mod_zer(xll, yll):
if yll == 0:
raise ZeroDivisionError
- return llop.ullong_mod(lltype.SignedLongLong, xll, yll)
+ return llop.ullong_mod(lltype.UnsignedLongLong, xll, yll)
+
+def _ll_2_uint_mod(xll, yll):
+ return llop.uint_mod(lltype.Unsigned, xll, yll)
# libffi support
diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py
--- a/pypy/jit/codewriter/test/test_flatten.py
+++ b/pypy/jit/codewriter/test/test_flatten.py
@@ -829,14 +829,15 @@
self.encoding_test(f, [rffi.cast(FROM, 42)], expectedstr,
transform=True)
elif TO in (rffi.LONG, rffi.ULONG):
+ if rffi.cast(FROM, -1) < 0:
+ fnname = "llong_from_int"
+ else:
+ fnname = "llong_from_uint"
if TO == rffi.LONG:
TO = rffi.LONGLONG
else:
TO = rffi.ULONGLONG
- if rffi.cast(FROM, -1) < 0:
- fnname = "llong_from_int"
- else:
- fnname = "llong_from_uint"
+ fnname = "u" + fnname
expected.pop() # remove int_return
expected.append(
"residual_call_irf_f $<* fn %s>, <Descr>, I[%s], R[], F[] -> %%f0"
diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py
--- a/pypy/jit/codewriter/test/test_longlong.py
+++ b/pypy/jit/codewriter/test/test_longlong.py
@@ -57,7 +57,8 @@
assert op1.opname == 'residual_call_irf_f'
else:
assert op1.opname == 'residual_call_irf_i'
- gotindex = getattr(EffectInfo, 'OS_' + op1.args[0].value.upper())
+ gotindex = getattr(EffectInfo,
+ 'OS_' + op1.args[0].value.upper().lstrip('U'))
assert gotindex == oopspecindex
assert op1.args[1] == 'calldescr-%d' % oopspecindex
assert list(op1.args[2]) == [v for v in vlist
@@ -192,8 +193,12 @@
[lltype.SignedLongLong], lltype.Signed)
self.do_check('cast_float_to_longlong', EffectInfo.OS_LLONG_FROM_FLOAT,
[lltype.Float], lltype.SignedLongLong)
+ self.do_check('cast_float_to_ulonglong', EffectInfo.OS_LLONG_FROM_FLOAT,
+ [lltype.Float], lltype.UnsignedLongLong)
self.do_check('cast_longlong_to_float', EffectInfo.OS_LLONG_TO_FLOAT,
[lltype.SignedLongLong], lltype.Float)
+ self.do_check('cast_ulonglong_to_float', EffectInfo.OS_LLONG_U_TO_FLOAT,
+ [lltype.UnsignedLongLong], lltype.Float)
for T1 in [lltype.SignedLongLong, lltype.UnsignedLongLong]:
for T2 in [lltype.Signed, lltype.Unsigned]:
self.do_check('cast_primitive', EffectInfo.OS_LLONG_TO_INT,
diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py
--- a/pypy/jit/metainterp/optimizeopt/__init__.py
+++ b/pypy/jit/metainterp/optimizeopt/__init__.py
@@ -7,6 +7,8 @@
from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble
from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall
from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify
+from pypy.jit.metainterp.optimizeopt.pure import OptPure
+from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce
from pypy.rlib.jit import PARAMETERS
from pypy.rlib.unroll import unrolling_iterable
@@ -14,6 +16,8 @@
('rewrite', OptRewrite),
('virtualize', OptVirtualize),
('string', OptString),
+ ('earlyforce', OptEarlyForce),
+ ('pure', OptPure),
('heap', OptHeap),
('ffi', None),
('unroll', None)]
diff --git a/pypy/jit/metainterp/optimizeopt/earlyforce.py b/pypy/jit/metainterp/optimizeopt/earlyforce.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/metainterp/optimizeopt/earlyforce.py
@@ -0,0 +1,24 @@
+from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
+from pypy.jit.metainterp.optimizeopt.vstring import VAbstractStringValue
+from pypy.jit.metainterp.resoperation import rop, ResOperation
+
+class OptEarlyForce(Optimization):
+ def propagate_forward(self, op):
+ opnum = op.getopnum()
+ if (opnum != rop.SETFIELD_GC and
+ opnum != rop.SETARRAYITEM_GC and
+ opnum != rop.QUASIIMMUT_FIELD):
+
+ for arg in op.getarglist():
+ if arg in self.optimizer.values:
+ value = self.getvalue(arg)
+ value.force_box(self)
+ self.emit_operation(op)
+
+ def new(self):
+ return OptEarlyForce()
+
+ def setup(self):
+ self.optimizer.optearlyforce = self
+
+
diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py
--- a/pypy/jit/metainterp/optimizeopt/fficall.py
+++ b/pypy/jit/metainterp/optimizeopt/fficall.py
@@ -177,10 +177,10 @@
funcinfo.descr is None):
return [op] # cannot optimize
funcsymval = self.getvalue(op.getarg(2))
- arglist = [funcsymval.force_box()]
+ arglist = [funcsymval.get_key_box()]
for push_op in funcinfo.opargs:
argval = self.getvalue(push_op.getarg(2))
- arglist.append(argval.force_box())
+ arglist.append(argval.get_key_box())
newop = ResOperation(rop.CALL_RELEASE_GIL, arglist, op.result,
descr=funcinfo.descr)
self.commit_optimization()
diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
--- a/pypy/jit/metainterp/optimizeopt/heap.py
+++ b/pypy/jit/metainterp/optimizeopt/heap.py
@@ -50,6 +50,7 @@
if not self._lazy_setfield_registered:
optheap._lazy_setfields_and_arrayitems.append(self)
self._lazy_setfield_registered = True
+
else:
# this is the case where the pending setfield ends up
# storing precisely the value that is already there,
@@ -158,12 +159,17 @@
self._lazy_setfields_and_arrayitems = []
self._remove_guard_not_invalidated = False
self._seen_guard_not_invalidated = False
+ self.posponedop = None
def force_at_end_of_preamble(self):
self.force_all_lazy_setfields_and_arrayitems()
def flush(self):
self.force_all_lazy_setfields_and_arrayitems()
+ if self.posponedop:
+ posponedop = self.posponedop
+ self.posponedop = None
+ self.next_optimization.propagate_forward(posponedop)
def new(self):
return OptHeap()
@@ -211,7 +217,15 @@
def emit_operation(self, op):
self.emitting_operation(op)
- self.next_optimization.propagate_forward(op)
+ if self.posponedop:
+ posponedop = self.posponedop
+ self.posponedop = None
+ self.next_optimization.propagate_forward(posponedop)
+ if (op.is_comparison() or op.getopnum() == rop.CALL_MAY_FORCE
+ or op.is_ovf()):
+ self.posponedop = op
+ else:
+ self.next_optimization.propagate_forward(op)
def emitting_operation(self, op):
if op.has_no_side_effect():
@@ -293,30 +307,6 @@
if indexvalue is None or indexvalue.intbound.contains(idx):
cf.force_lazy_setfield(self, can_cache)
- def fixup_guard_situation(self):
- # hackish: reverse the order of the last two operations if it makes
- # sense to avoid a situation like "int_eq/setfield_gc/guard_true",
- # which the backend (at least the x86 backend) does not handle well.
- newoperations = self.optimizer.newoperations
- if len(newoperations) < 2:
- return
- lastop = newoperations[-1]
- if (lastop.getopnum() != rop.SETFIELD_GC and
- lastop.getopnum() != rop.SETARRAYITEM_GC):
- return
- # - is_comparison() for cases like "int_eq/setfield_gc/guard_true"
- # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced"
- # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow"
- prevop = newoperations[-2]
- opnum = prevop.getopnum()
- if not (prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE
- or prevop.is_ovf()):
- return
- if prevop.result in lastop.getarglist():
- return
- newoperations[-2] = lastop
- newoperations[-1] = prevop
-
def _assert_valid_cf(self, cf):
# check that 'cf' is in cached_fields or cached_arrayitems
if not we_are_translated():
@@ -362,7 +352,6 @@
fieldvalue.get_key_box(), itemindex))
else:
cf.force_lazy_setfield(self)
- self.fixup_guard_situation()
return pendingfields
def optimize_GETFIELD_GC(self, op):
@@ -374,12 +363,22 @@
return
# default case: produce the operation
structvalue.ensure_nonnull()
- ###self.optimizer.optimize_default(op)
self.emit_operation(op)
# then remember the result of reading the field
fieldvalue = self.getvalue(op.result)
cf.remember_field_value(structvalue, fieldvalue, op)
+ def optimize_GETFIELD_GC_PURE(self, op):
+ structvalue = self.getvalue(op.getarg(0))
+ cf = self.field_cache(op.getdescr())
+ fieldvalue = cf.getfield_from_cache(self, structvalue)
+ if fieldvalue is not None:
+ self.make_equal_to(op.result, fieldvalue)
+ return
+ # default case: produce the operation
+ structvalue.ensure_nonnull()
+ self.emit_operation(op)
+
def optimize_SETFIELD_GC(self, op):
if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)],
op.getdescr()):
@@ -389,6 +388,7 @@
#
cf = self.field_cache(op.getdescr())
cf.do_setfield(self, op)
+
def optimize_GETARRAYITEM_GC(self, op):
arrayvalue = self.getvalue(op.getarg(0))
@@ -413,6 +413,25 @@
fieldvalue = self.getvalue(op.result)
cf.remember_field_value(arrayvalue, fieldvalue, op)
+ def optimize_GETARRAYITEM_GC_PURE(self, op):
+ arrayvalue = self.getvalue(op.getarg(0))
+ indexvalue = self.getvalue(op.getarg(1))
+ cf = None
+ if indexvalue.is_constant():
+ arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint())
+ # use the cache on (arraydescr, index), which is a constant
+ cf = self.arrayitem_cache(op.getdescr(), indexvalue.box.getint())
+ fieldvalue = cf.getfield_from_cache(self, arrayvalue)
+ if fieldvalue is not None:
+ self.make_equal_to(op.result, fieldvalue)
+ return
+ else:
+ # variable index, so make sure the lazy setarrayitems are done
+ self.force_lazy_setarrayitem(op.getdescr(), indexvalue=indexvalue)
+ # default case: produce the operation
+ arrayvalue.ensure_nonnull()
+ self.emit_operation(op)
+
def optimize_SETARRAYITEM_GC(self, op):
if self.has_pure_result(rop.GETARRAYITEM_GC_PURE, [op.getarg(0),
op.getarg(1)],
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -31,8 +31,8 @@
class OptValue(object):
__metaclass__ = extendabletype
- _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', 'lenbound')
- last_guard_index = -1
+ _attrs_ = ('box', 'known_class', 'last_guard', 'level', 'intbound', 'lenbound')
+ last_guard = None
level = LEVEL_UNKNOWN
known_class = None
@@ -100,7 +100,7 @@
self.make_constant(other.get_key_box())
optimizer.turned_constant(self)
elif other.level == LEVEL_KNOWNCLASS:
- self.make_constant_class(other.known_class, -1)
+ self.make_constant_class(other.known_class, None)
else:
if other.level == LEVEL_NONNULL:
self.ensure_nonnull()
@@ -114,13 +114,13 @@
self.lenbound = other.lenbound.clone()
- def force_box(self):
+ def force_box(self, optforce):
return self.box
def get_key_box(self):
return self.box
- def force_at_end_of_preamble(self, already_forced):
+ def force_at_end_of_preamble(self, already_forced, optforce):
return self
def get_args_for_fail(self, modifier):
@@ -162,16 +162,16 @@
else:
return None
- def make_constant_class(self, classbox, opindex):
+ def make_constant_class(self, classbox, guardop):
assert self.level < LEVEL_KNOWNCLASS
self.known_class = classbox
self.level = LEVEL_KNOWNCLASS
- self.last_guard_index = opindex
+ self.last_guard = guardop
- def make_nonnull(self, opindex):
+ def make_nonnull(self, guardop):
assert self.level < LEVEL_NONNULL
self.level = LEVEL_NONNULL
- self.last_guard_index = opindex
+ self.last_guard = guardop
def is_nonnull(self):
level = self.level
@@ -223,6 +223,9 @@
def __init__(self, box):
self.make_constant(box)
+ def __repr__(self):
+ return 'Constant(%r)' % (self.box,)
+
CONST_0 = ConstInt(0)
CONST_1 = ConstInt(1)
CVAL_ZERO = ConstantValue(CONST_0)
@@ -237,26 +240,12 @@
def __init__(self):
pass # make rpython happy
- def propagate_begin_forward(self):
- if self.next_optimization:
- self.next_optimization.propagate_begin_forward()
-
- def propagate_end_forward(self):
- if self.next_optimization:
- self.next_optimization.propagate_end_forward()
-
def propagate_forward(self, op):
raise NotImplementedError
def emit_operation(self, op):
self.next_optimization.propagate_forward(op)
- def test_emittable(self, op):
- return self.is_emittable(op)
-
- def is_emittable(self, op):
- return self.next_optimization.test_emittable(op)
-
# FIXME: Move some of these here?
def getvalue(self, box):
return self.optimizer.getvalue(box)
@@ -286,19 +275,19 @@
return self.optimizer.new_const_item(arraydescr)
def pure(self, opnum, args, result):
- op = ResOperation(opnum, args, result)
- key = self.optimizer.make_args_key(op)
- if key not in self.optimizer.pure_operations:
- self.optimizer.pure_operations[key] = op
+ if self.optimizer.optpure:
+ self.optimizer.optpure.pure(opnum, args, result)
def has_pure_result(self, opnum, args, descr):
- op = ResOperation(opnum, args, None, descr)
- key = self.optimizer.make_args_key(op)
- op = self.optimizer.pure_operations.get(key, None)
- if op is None:
- return False
- return op.getdescr() is descr
+ if self.optimizer.optpure:
+ return self.optimizer.optpure.has_pure_result(opnum, args, descr)
+ return False
+ def get_pure_result(self, key):
+ if self.optimizer.optpure:
+ return self.optimizer.optpure.get_pure_result(key)
+ return None
+
def setup(self):
pass
@@ -319,6 +308,9 @@
def produce_potential_short_preamble_ops(self, potential_ops):
pass
+ def forget_numberings(self, box):
+ self.optimizer.forget_numberings(box)
+
class Optimizer(Optimization):
def __init__(self, metainterp_sd, loop, optimizations=None, bridge=False):
@@ -330,14 +322,16 @@
self.interned_refs = self.cpu.ts.new_ref_dict()
self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd)
self.bool_boxes = {}
- self.pure_operations = args_dict()
self.producer = {}
self.pendingfields = []
- self.posponedop = None
self.exception_might_have_happened = False
self.quasi_immutable_deps = None
self.opaque_pointers = {}
- self.newoperations = []
+ self.replaces_guard = {}
+ self._newoperations = []
+ self.optimizer = self
+ self.optpure = None
+ self.optearlyforce = None
if loop is not None:
self.call_pure_results = loop.call_pure_results
@@ -366,21 +360,20 @@
def flush(self):
for o in self.optimizations:
o.flush()
- assert self.posponedop is None
def new(self):
new = Optimizer(self.metainterp_sd, self.loop)
return self._new(new)
def _new(self, new):
- assert self.posponedop is None
optimizations = [o.new() for o in self.optimizations]
new.set_optimizations(optimizations)
new.quasi_immutable_deps = self.quasi_immutable_deps
return new
def produce_potential_short_preamble_ops(self, sb):
- raise NotImplementedError('This is implemented in unroll.UnrollableOptimizer')
+ for opt in self.optimizations:
+ opt.produce_potential_short_preamble_ops(sb)
def turned_constant(self, value):
for o in self.optimizations:
@@ -430,6 +423,13 @@
return constbox
return None
+ def get_newoperations(self):
+ self.flush()
+ return self._newoperations
+
+ def clear_newoperations(self):
+ self._newoperations = []
+
def make_equal_to(self, box, value, replace=False):
assert isinstance(value, OptValue)
assert replace or box not in self.values
@@ -478,15 +478,10 @@
def propagate_all_forward(self):
self.exception_might_have_happened = self.bridge
- self.newoperations = []
- self.first_optimization.propagate_begin_forward()
- self.i = 0
- while self.i < len(self.loop.operations):
- op = self.loop.operations[self.i]
+ self.clear_newoperations()
+ for op in self.loop.operations:
self.first_optimization.propagate_forward(op)
- self.i += 1
- self.first_optimization.propagate_end_forward()
- self.loop.operations = self.newoperations
+ self.loop.operations = self.get_newoperations()
self.loop.quasi_immutable_deps = self.quasi_immutable_deps
# accumulate counters
self.resumedata_memo.update_counters(self.metainterp_sd.profiler)
@@ -498,9 +493,6 @@
self.producer[op.result] = op
dispatch_opt(self, op)
- def test_emittable(self, op):
- return True
-
def emit_operation(self, op):
if op.returns_bool_result():
self.bool_boxes[self.getvalue(op.result)] = None
@@ -516,14 +508,30 @@
pass
else:
self.ensure_imported(value)
- op.setarg(i, value.force_box())
+ op.setarg(i, value.force_box(self))
self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
if op.is_guard():
self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
- op = self.store_final_boxes_in_guard(op)
+ if self.replaces_guard and op in self.replaces_guard:
+ self.replace_op(self.replaces_guard[op], op)
+ del self.replaces_guard[op]
+ return
+ else:
+ op = self.store_final_boxes_in_guard(op)
elif op.can_raise():
self.exception_might_have_happened = True
- self.newoperations.append(op)
+ self._newoperations.append(op)
+
+ def replace_op(self, old_op, new_op):
+ # XXX: Do we want to cache indexes to prevent search?
+ i = len(self._newoperations)
+ while i > 0:
+ i -= 1
+ if self._newoperations[i] is old_op:
+ self._newoperations[i] = new_op
+ break
+ else:
+ assert False
def store_final_boxes_in_guard(self, op):
descr = op.getdescr()
@@ -571,51 +579,8 @@
args[n + 1] = op.getdescr()
return args
- @specialize.argtype(0)
def optimize_default(self, op):
- canfold = op.is_always_pure()
- if op.is_ovf():
- self.posponedop = op
- return
- if self.posponedop:
- nextop = op
- op = self.posponedop
- self.posponedop = None
- canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW
- else:
- nextop = None
-
- if canfold:
- for i in range(op.numargs()):
- if self.get_constant_box(op.getarg(i)) is None:
- break
- else:
- # all constant arguments: constant-fold away
- resbox = self.constant_fold(op)
- # note that INT_xxx_OVF is not done from here, and the
- # overflows in the INT_xxx operations are ignored
- self.make_constant(op.result, resbox)
- return
-
- # did we do the exact same operation already?
- args = self.make_args_key(op)
- oldop = self.pure_operations.get(args, None)
- if oldop is not None and oldop.getdescr() is op.getdescr():
- assert oldop.getopnum() == op.getopnum()
- self.make_equal_to(op.result, self.getvalue(oldop.result),
- True)
- return
- else:
- self.pure_operations[args] = op
- self.remember_emitting_pure(op)
-
- # otherwise, the operation remains
self.emit_operation(op)
- if nextop:
- self.emit_operation(nextop)
-
- def remember_emitting_pure(self, op):
- pass
def constant_fold(self, op):
argboxes = [self.get_constant_box(op.getarg(i))
@@ -633,11 +598,6 @@
def optimize_DEBUG_MERGE_POINT(self, op):
self.emit_operation(op)
- def optimize_CAST_OPAQUE_PTR(self, op):
- value = self.getvalue(op.getarg(0))
- self.opaque_pointers[value] = True
- self.make_equal_to(op.result, value)
-
def optimize_GETARRAYITEM_GC_PURE(self, op):
indexvalue = self.getvalue(op.getarg(1))
if indexvalue.is_constant():
diff --git a/pypy/jit/metainterp/optimizeopt/pure.py b/pypy/jit/metainterp/optimizeopt/pure.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/metainterp/optimizeopt/pure.py
@@ -0,0 +1,115 @@
+from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
+from pypy.jit.metainterp.resoperation import rop, ResOperation
+from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method,
+ args_dict)
+
+class OptPure(Optimization):
+ def __init__(self):
+ self.posponedop = None
+ self.pure_operations = args_dict()
+ self.emitted_pure_operations = {}
+
+ def propagate_forward(self, op):
+ dispatch_opt(self, op)
+
+ def optimize_default(self, op):
+ canfold = op.is_always_pure()
+ if op.is_ovf():
+ self.posponedop = op
+ return
+ if self.posponedop:
+ nextop = op
+ op = self.posponedop
+ self.posponedop = None
+ canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW
+ else:
+ nextop = None
+
+ if canfold:
+ for i in range(op.numargs()):
+ if self.get_constant_box(op.getarg(i)) is None:
+ break
+ else:
+ # all constant arguments: constant-fold away
+ resbox = self.optimizer.constant_fold(op)
+ # note that INT_xxx_OVF is not done from here, and the
+ # overflows in the INT_xxx operations are ignored
+ self.optimizer.make_constant(op.result, resbox)
+ return
+
+ # did we do the exact same operation already?
+ args = self.optimizer.make_args_key(op)
+ oldop = self.pure_operations.get(args, None)
+ if oldop is not None and oldop.getdescr() is op.getdescr():
+ assert oldop.getopnum() == op.getopnum()
+ self.optimizer.make_equal_to(op.result, self.getvalue(oldop.result),
+ True)
+ return
+ else:
+ self.pure_operations[args] = op
+ self.remember_emitting_pure(op)
+
+ # otherwise, the operation remains
+ self.emit_operation(op)
+ if op.returns_bool_result():
+ self.optimizer.bool_boxes[self.getvalue(op.result)] = None
+ if nextop:
+ self.emit_operation(nextop)
+
+ def optimize_CALL_PURE(self, op):
+ args = self.optimizer.make_args_key(op)
+ oldop = self.pure_operations.get(args, None)
+ if oldop is not None and oldop.getdescr() is op.getdescr():
+ assert oldop.getopnum() == op.getopnum()
+ self.make_equal_to(op.result, self.getvalue(oldop.result))
+ return
+ else:
+ self.pure_operations[args] = op
+ self.remember_emitting_pure(op)
+
+ # replace CALL_PURE with just CALL
+ args = op.getarglist()
+ self.emit_operation(ResOperation(rop.CALL, args, op.result,
+ op.getdescr()))
+
+ def flush(self):
+ assert self.posponedop is None
+
+ def new(self):
+ assert self.posponedop is None
+ return OptPure()
+
+ def setup(self):
+ self.optimizer.optpure = self
+
+ def pure(self, opnum, args, result):
+ op = ResOperation(opnum, args, result)
+ key = self.optimizer.make_args_key(op)
+ if key not in self.pure_operations:
+ self.pure_operations[key] = op
+
+ def has_pure_result(self, opnum, args, descr):
+ op = ResOperation(opnum, args, None, descr)
+ key = self.optimizer.make_args_key(op)
+ op = self.pure_operations.get(key, None)
+ if op is None:
+ return False
+ return op.getdescr() is descr
+
+ def get_pure_result(self, key):
+ return self.pure_operations.get(key, None)
+
+ def remember_emitting_pure(self, op):
+ self.emitted_pure_operations[op] = True
+
+ def produce_potential_short_preamble_ops(self, sb):
+ for op in self.emitted_pure_operations:
+ if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \
+ op.getopnum() == rop.STRGETITEM or \
+ op.getopnum() == rop.UNICODEGETITEM:
+ if not self.getvalue(op.getarg(1)).is_constant():
+ continue
+ sb.add_potential(op)
+
+dispatch_opt = make_dispatcher_method(OptPure, 'optimize_',
+ default=OptPure.optimize_default)
diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py
--- a/pypy/jit/metainterp/optimizeopt/rewrite.py
+++ b/pypy/jit/metainterp/optimizeopt/rewrite.py
@@ -31,21 +31,8 @@
dispatch_opt(self, op)
- def test_emittable(self, op):
- opnum = op.getopnum()
- for value, cls, func in optimize_guards:
- if opnum == value:
- assert isinstance(op, cls)
- try:
- func(self, op, dryrun=True)
- return self.is_emittable(op)
- except InvalidLoop:
- return False
- return self.is_emittable(op)
-
-
def try_boolinvers(self, op, targs):
- oldop = self.optimizer.pure_operations.get(targs, None)
+ oldop = self.get_pure_result(targs)
if oldop is not None and oldop.getdescr() is op.getdescr():
value = self.getvalue(oldop.result)
if value.is_constant():
@@ -62,32 +49,35 @@
def find_rewritable_bool(self, op, args):
try:
oldopnum = opboolinvers[op.getopnum()]
+ except KeyError:
+ pass
+ else:
targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[0], args[1]],
None))
if self.try_boolinvers(op, targs):
return True
- except KeyError:
- pass
try:
oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL
+ except KeyError:
+ pass
+ else:
targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]],
None))
- oldop = self.optimizer.pure_operations.get(targs, None)
+ oldop = self.get_pure_result(targs)
if oldop is not None and oldop.getdescr() is op.getdescr():
self.make_equal_to(op.result, self.getvalue(oldop.result))
return True
- except KeyError:
- pass
try:
oldopnum = opboolinvers[opboolreflex[op.getopnum()]]
+ except KeyError:
+ pass
+ else:
targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]],
None))
if self.try_boolinvers(op, targs):
return True
- except KeyError:
- pass
return False
@@ -214,40 +204,7 @@
self.emit_operation(op)
self.pure(rop.FLOAT_NEG, [op.result], v1)
- def optimize_CALL_PURE(self, op):
- arg_consts = []
- for i in range(op.numargs()):
- arg = op.getarg(i)
- const = self.get_constant_box(arg)
- if const is None:
- break
- arg_consts.append(const)
- else:
- # all constant arguments: check if we already know the result
- try:
- result = self.optimizer.call_pure_results[arg_consts]
- except KeyError:
- pass
- else:
- self.make_constant(op.result, result)
- return
-
- args = self.optimizer.make_args_key(op)
- oldop = self.optimizer.pure_operations.get(args, None)
- if oldop is not None and oldop.getdescr() is op.getdescr():
- assert oldop.getopnum() == op.getopnum()
- self.make_equal_to(op.result, self.getvalue(oldop.result))
- return
- else:
- self.optimizer.pure_operations[args] = op
- self.optimizer.remember_emitting_pure(op)
-
- # replace CALL_PURE with just CALL
- args = op.getarglist()
- self.emit_operation(ResOperation(rop.CALL, args, op.result,
- op.getdescr()))
-
- def optimize_guard(self, op, constbox, emit_operation=True, dryrun=False):
+ def optimize_guard(self, op, constbox, emit_operation=True):
value = self.getvalue(op.getarg(0))
if value.is_constant():
box = value.box
@@ -255,62 +212,57 @@
if not box.same_constant(constbox):
raise InvalidLoop
return
- if dryrun: return
if emit_operation:
self.emit_operation(op)
value.make_constant(constbox)
self.optimizer.turned_constant(value)
- def optimize_GUARD_ISNULL(self, op, dryrun=False):
+ def optimize_GUARD_ISNULL(self, op):
value = self.getvalue(op.getarg(0))
if value.is_null():
return
elif value.is_nonnull():
raise InvalidLoop
- if dryrun: return
self.emit_operation(op)
value.make_constant(self.optimizer.cpu.ts.CONST_NULL)
- def optimize_GUARD_NONNULL(self, op, dryrun=False):
+ def optimize_GUARD_NONNULL(self, op):
value = self.getvalue(op.getarg(0))
if value.is_nonnull():
return
elif value.is_null():
raise InvalidLoop
- if dryrun: return
self.emit_operation(op)
- value.make_nonnull(len(self.optimizer.newoperations) - 1)
+ value.make_nonnull(op)
- def optimize_GUARD_VALUE(self, op, dryrun=False):
+ def optimize_GUARD_VALUE(self, op):
value = self.getvalue(op.getarg(0))
- emit_operation = True
- if not dryrun and value.last_guard_index != -1:
+ if value.last_guard:
# there already has been a guard_nonnull or guard_class or
# guard_nonnull_class on this value, which is rather silly.
# replace the original guard with a guard_value
- old_guard_op = self.optimizer.newoperations[value.last_guard_index]
- new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE,
- args = [old_guard_op.getarg(0), op.getarg(1)])
- self.optimizer.newoperations[value.last_guard_index] = new_guard_op
+ old_guard_op = value.last_guard
+ op = old_guard_op.copy_and_change(rop.GUARD_VALUE,
+ args = [old_guard_op.getarg(0), op.getarg(1)])
+ self.optimizer.replaces_guard[op] = old_guard_op
# hack hack hack. Change the guard_opnum on
# new_guard_op.getdescr() so that when resuming,
# the operation is not skipped by pyjitpl.py.
- descr = new_guard_op.getdescr()
+ descr = op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
descr.guard_opnum = rop.GUARD_VALUE
- descr.make_a_counter_per_value(new_guard_op)
- emit_operation = False
+ descr.make_a_counter_per_value(op)
constbox = op.getarg(1)
assert isinstance(constbox, Const)
- self.optimize_guard(op, constbox, emit_operation, dryrun)
+ self.optimize_guard(op, constbox)
- def optimize_GUARD_TRUE(self, op, dryrun=False):
- self.optimize_guard(op, CONST_1, dryrun=dryrun)
+ def optimize_GUARD_TRUE(self, op):
+ self.optimize_guard(op, CONST_1)
- def optimize_GUARD_FALSE(self, op, dryrun=False):
- self.optimize_guard(op, CONST_0, dryrun=dryrun)
+ def optimize_GUARD_FALSE(self, op):
+ self.optimize_guard(op, CONST_0)
- def optimize_GUARD_CLASS(self, op, dryrun=False):
+ def optimize_GUARD_CLASS(self, op):
value = self.getvalue(op.getarg(0))
expectedclassbox = op.getarg(1)
assert isinstance(expectedclassbox, Const)
@@ -319,38 +271,32 @@
if realclassbox.same_constant(expectedclassbox):
return
raise InvalidLoop
- if dryrun: return
- emit_operation = True
- if value.last_guard_index != -1:
+ if value.last_guard:
# there already has been a guard_nonnull or guard_class or
# guard_nonnull_class on this value.
- old_guard_op = self.optimizer.newoperations[value.last_guard_index]
+ old_guard_op = value.last_guard
if old_guard_op.getopnum() == rop.GUARD_NONNULL:
# it was a guard_nonnull, which we replace with a
# guard_nonnull_class.
- new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS,
+ op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS,
args = [old_guard_op.getarg(0), op.getarg(1)])
- self.optimizer.newoperations[value.last_guard_index] = new_guard_op
+ self.optimizer.replaces_guard[op] = old_guard_op
# hack hack hack. Change the guard_opnum on
# new_guard_op.getdescr() so that when resuming,
# the operation is not skipped by pyjitpl.py.
- descr = new_guard_op.getdescr()
+ descr = op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
descr.guard_opnum = rop.GUARD_NONNULL_CLASS
- emit_operation = False
- if emit_operation:
- self.emit_operation(op)
- last_guard_index = len(self.optimizer.newoperations) - 1
- else:
- last_guard_index = value.last_guard_index
- value.make_constant_class(expectedclassbox, last_guard_index)
+ self.emit_operation(op)
+ value.make_constant_class(expectedclassbox, op)
- def optimize_GUARD_NONNULL_CLASS(self, op, dryrun=False):
- self.optimize_GUARD_NONNULL(op, True)
- self.optimize_GUARD_CLASS(op, dryrun)
+ def optimize_GUARD_NONNULL_CLASS(self, op):
+ value = self.getvalue(op.getarg(0))
+ if value.is_null():
+ raise InvalidLoop
+ self.optimize_GUARD_CLASS(op)
- def optimize_GUARD_NO_EXCEPTION(self, op, dryrun=False):
- if dryrun: return
+ def optimize_GUARD_NO_EXCEPTION(self, op):
if not self.optimizer.exception_might_have_happened:
return
self.emit_operation(op)
@@ -470,7 +416,7 @@
newop = ResOperation(rop.SETARRAYITEM_GC,
[op.getarg(2),
ConstInt(index + dest_start),
- val.force_box()], None,
+ val.get_key_box()], None,
descr=source_value.arraydescr)
self.emit_operation(newop)
return True
@@ -478,6 +424,25 @@
return True # 0-length arraycopy
return False
+ def optimize_CALL_PURE(self, op):
+ arg_consts = []
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
+ const = self.get_constant_box(arg)
+ if const is None:
+ break
+ arg_consts.append(const)
+ else:
+ # all constant arguments: check if we already know the result
+ try:
+ result = self.optimizer.call_pure_results[arg_consts]
+ except KeyError:
+ pass
+ else:
+ self.make_constant(op.result, result)
+ return
+ self.emit_operation(op)
+
def optimize_INT_FLOORDIV(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
@@ -492,6 +457,10 @@
args = [op.getarg(0), ConstInt(highest_bit(val))])
self.emit_operation(op)
+ def optimize_CAST_OPAQUE_PTR(self, op):
+ value = self.getvalue(op.getarg(0))
+ self.optimizer.opaque_pointers[value] = True
+ self.make_equal_to(op.result, value)
dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_',
default=OptRewrite.emit_operation)
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -39,8 +39,9 @@
def test_sharing_field_lists_of_virtual():
class FakeOptimizer(object):
- class cpu(object):
- pass
+ class optimizer(object):
+ class cpu(object):
+ pass
opt = FakeOptimizer()
virt1 = virtualize.AbstractVirtualStructValue(opt, None)
lst1 = virt1._get_field_descr_list()
@@ -69,7 +70,7 @@
class FakeVirtualValue(virtualize.AbstractVirtualValue):
def _make_virtual(self, *args):
return FakeVInfo()
- v1 = FakeVirtualValue(None, None, None)
+ v1 = FakeVirtualValue(None, None)
vinfo1 = v1.make_virtual_info(None, [1, 2, 4])
vinfo2 = v1.make_virtual_info(None, [1, 2, 4])
assert vinfo1 is vinfo2
@@ -111,7 +112,7 @@
class BaseTestBasic(BaseTest):
- enable_opts = "intbounds:rewrite:virtualize:string:heap"
+ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap"
def optimize_loop(self, ops, optops, call_pure_results=None):
@@ -651,8 +652,8 @@
i3 = getfield_gc(p3, descr=valuedescr)
escape(i3)
p1 = new_with_vtable(ConstClass(node_vtable))
+ p1sub = new_with_vtable(ConstClass(node_vtable2))
setfield_gc(p1, i1, descr=valuedescr)
- p1sub = new_with_vtable(ConstClass(node_vtable2))
setfield_gc(p1sub, i1, descr=valuedescr)
setfield_gc(p1, p1sub, descr=nextdescr)
jump(i1, p1, p2)
@@ -667,10 +668,10 @@
p3sub = getfield_gc(p3, descr=nextdescr)
i3 = getfield_gc(p3sub, descr=valuedescr)
escape(i3)
+ p1 = new_with_vtable(ConstClass(node_vtable))
p2sub = new_with_vtable(ConstClass(node_vtable2))
setfield_gc(p2sub, i1, descr=valuedescr)
setfield_gc(p2, p2sub, descr=nextdescr)
- p1 = new_with_vtable(ConstClass(node_vtable))
jump(i1, p1, p2)
"""
# The same as test_p123_simple, but in the end the "old" p2 contains
@@ -4767,6 +4768,26 @@
# other
self.optimize_loop(ops, expected)
+ def test_plain_virtual_string_copy_content(self):
+ ops = """
+ []
+ p0 = newstr(6)
+ copystrcontent(s"hello!", p0, 0, 0, 6)
+ p1 = call(0, p0, s"abc123", descr=strconcatdescr)
+ i0 = strgetitem(p1, 0)
+ finish(i0)
+ """
+ expected = """
+ []
+ p0 = newstr(6)
+ copystrcontent(s"hello!", p0, 0, 0, 6)
+ p1 = newstr(12)
+ copystrcontent(p0, p1, 0, 0, 6)
+ copystrcontent(s"abc123", p1, 0, 6, 6)
+ i0 = strgetitem(p1, 0)
+ finish(i0)
+ """
+ self.optimize_strunicode_loop(ops, expected)
class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
@@ -36,7 +36,7 @@
class TestFfiCall(BaseTestBasic, LLtypeMixin):
- enable_opts = "intbounds:rewrite:virtualize:string:heap:ffi"
+ enable_opts = "intbounds:rewrite:virtualize:string:pure:earlyforce:heap:ffi"
class namespace:
cpu = LLtypeMixin.cpu
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -68,7 +68,7 @@
class BaseTestWithUnroll(BaseTest):
- enable_opts = "intbounds:rewrite:virtualize:string:heap:unroll"
+ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll"
def optimize_loop(self, ops, expected, expected_preamble=None,
call_pure_results=None, expected_short=None):
@@ -825,8 +825,8 @@
i3 = getfield_gc(p2, descr=valuedescr)
escape(i3)
p4 = new_with_vtable(ConstClass(node_vtable))
+ p1sub = new_with_vtable(ConstClass(node_vtable2))
setfield_gc(p4, i1, descr=valuedescr)
- p1sub = new_with_vtable(ConstClass(node_vtable2))
setfield_gc(p1sub, i1, descr=valuedescr)
setfield_gc(p4, p1sub, descr=nextdescr)
jump(i1, p4)
@@ -865,13 +865,7 @@
p3sub = new_with_vtable(ConstClass(node_vtable2))
setfield_gc(p3sub, i1, descr=valuedescr)
setfield_gc(p1, p3sub, descr=nextdescr)
- # XXX: We get two extra operations here because the setfield
- # above is the result of forcing p1 and thus not
- # registered with the heap optimizer. I've makred tests
- # below with VIRTUALHEAP if they suffer from this issue
- p3sub2 = getfield_gc(p1, descr=nextdescr)
- guard_nonnull_class(p3sub2, ConstClass(node_vtable2)) []
- jump(i1, p1, p3sub2)
+ jump(i1, p1, p3sub)
"""
self.optimize_loop(ops, expected, preamble)
@@ -902,9 +896,7 @@
guard_true(i2b) []
p3 = new_with_vtable(ConstClass(node_vtable))
setfield_gc(p3, i2, descr=nextdescr)
- # XXX: VIRTUALHEAP (see above)
- i3 = getfield_gc(p3, descr=nextdescr)
- jump(p3, i3)
+ jump(p3, i2)
"""
self.optimize_loop(ops, expected, preamble)
@@ -1219,7 +1211,15 @@
setfield_gc(p3, p30, descr=valuedescr)
jump(i29, p30, p3)
"""
- expected = preamble
+ expected = """
+ [i0, p1, p3]
+ i28 = int_add(i0, 1)
+ i29 = int_add(i28, 1)
+ p30 = new_with_vtable(ConstClass(node_vtable))
+ setfield_gc(p3, p30, descr=valuedescr)
+ setfield_gc(p30, i28, descr=nextdescr)
+ jump(i29, p30, p3)
+ """
self.optimize_loop(ops, expected, preamble)
def test_nonvirtual_1(self):
@@ -2408,8 +2408,8 @@
guard_class(p2, ConstClass(node_vtable)) []
p3 = getfield_gc(p1, descr=otherdescr)
guard_class(p3, ConstClass(node_vtable)) []
+ p3a = new_with_vtable(ConstClass(node_vtable))
setfield_gc(p3, p2, descr=otherdescr)
- p3a = new_with_vtable(ConstClass(node_vtable))
escape(p3a)
jump(p3a)
"""
@@ -2421,9 +2421,9 @@
# setfield_gc(p3, p2, descr=otherdescr) # p3a.other = p2a
# p1a = new_with_vtable(ConstClass(node_vtable2))
# p2a = new_with_vtable(ConstClass(node_vtable))
+ p3anew = new_with_vtable(ConstClass(node_vtable))
p2 = new_with_vtable(ConstClass(node_vtable))
setfield_gc(p3a, p2, descr=otherdescr) # p3a.other = p2a
- p3anew = new_with_vtable(ConstClass(node_vtable))
escape(p3anew)
jump(p3anew)
"""
@@ -2458,9 +2458,9 @@
p3 = getfield_gc(p1, descr=otherdescr)
guard_class(p3, ConstClass(node_vtable)) []
# p1a = new_with_vtable(ConstClass(node_vtable2))
+ p3a = new_with_vtable(ConstClass(node_vtable))
p2a = new_with_vtable(ConstClass(node_vtable))
setfield_gc(p3, p2a, descr=otherdescr)
- p3a = new_with_vtable(ConstClass(node_vtable))
escape(p3a)
# setfield_gc(p1a, p2a, descr=nextdescr)
# setfield_gc(p1a, p3a, descr=otherdescr)
@@ -2468,9 +2468,9 @@
"""
expected = """
[p2, p3]
+ p3a = new_with_vtable(ConstClass(node_vtable))
p2a = new_with_vtable(ConstClass(node_vtable))
setfield_gc(p3, p2a, descr=otherdescr)
- p3a = new_with_vtable(ConstClass(node_vtable))
escape(p3a)
jump(p2a, p3a)
"""
@@ -2790,7 +2790,6 @@
self.optimize_loop(ops, expected, preamble)
def test_remove_duplicate_pure_op_ovf_with_lazy_setfield(self):
- py.test.skip('this optimization is not yet supprted')
ops = """
[i1, p1]
i3 = int_add_ovf(i1, 1)
@@ -5961,13 +5960,18 @@
escape(i0)
jump(p1)
"""
- expected = """
+ preamble = """
[p1]
i0 = ptr_eq(p1, NULL)
escape(i0)
- jump(p1)
- """
- self.optimize_strunicode_loop_extradescrs(ops, expected, expected)
+ jump(p1, i0)
+ """
+ expected = """
+ [p1, i0]
+ escape(i0)
+ jump(p1, i0)
+ """
+ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
def test_str_equal_none2(self):
ops = """
@@ -5976,13 +5980,18 @@
escape(i0)
jump(p1)
"""
- expected = """
+ preamble = """
[p1]
i0 = ptr_eq(p1, NULL)
escape(i0)
- jump(p1)
- """
- self.optimize_strunicode_loop_extradescrs(ops, expected, expected)
+ jump(p1, i0)
+ """
+ expected = """
+ [p1, i0]
+ escape(i0)
+ jump(p1, i0)
+ """
+ self.optimize_strunicode_loop_extradescrs(ops, expected, preamble)
def test_str_equal_nonnull1(self):
ops = """
@@ -7220,6 +7229,60 @@
"""
self.optimize_loop(ops, expected)
+ def test_heap_cache_forced_virtuals(self):
+ ops = """
+ [i1, i2, p0]
+ p1 = new(descr=ssize)
+ setfield_gc(p1, i1, descr=adescr)
+ setfield_gc(p1, i2, descr=bdescr)
+ call(p0, p1, descr=writeadescr)
+ i3 = getfield_gc(p1, descr=adescr)
+ i4 = getfield_gc(p1, descr=bdescr)
+ jump(i3, i4, p0)
+ """
+ expected = """
+ [i1, i2, p0]
+ p1 = new(descr=ssize)
+ setfield_gc(p1, i1, descr=adescr)
+ call(p0, p1, descr=writeadescr)
+ i3 = getfield_gc(p1, descr=adescr)
+ setfield_gc(p1, i2, descr=bdescr)
+ jump(i3, i2, p0)
+ """
+ self.optimize_loop(ops, expected)
+
+
+ def test_setarrayitem_followed_by_arraycopy(self):
+ ops = """
+ [p1, p2]
+ setarrayitem_gc(p1, 2, 10, descr=arraydescr)
+ setarrayitem_gc(p2, 3, 13, descr=arraydescr)
+ call(0, p1, p2, 0, 0, 10, descr=arraycopydescr)
+ jump(p1, p2)
+ """
+ self.optimize_loop(ops, ops)
+
+ def test_heap_cache_virtuals_forced_by_delayed_setfield(self):
+ py.test.skip('not yet supoprted')
+ ops = """
+ [i1, p0]
+ p1 = new(descr=ssize)
+ setfield_gc(p1, i1, descr=valuedescr)
+ setfield_gc(p0, p1, descr=adescr)
+ call(p0, descr=writeadescr)
+ i2 = getfield_gc(p1, descr=valuedescr)
+ jump(i2, p0)
+ """
+ expected = """
+ [i1, p0]
+ p1 = new(descr=ssize)
+ setfield_gc(p1, i1, descr=valuedescr)
+ setfield_gc(p0, p1, descr=adescr)
+ call(p0, descr=writeadescr)
+ jump(i1, p0)
+ """
+ self.optimize_loop(ops, expected)
+
class TestLLtype(OptimizeOptTest, LLtypeMixin):
pass
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_util.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_util.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py
@@ -182,7 +182,8 @@
EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE,
can_invalidate=True))
arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
- EffectInfo([], [], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY))
+ EffectInfo([], [arraydescr], [], [arraydescr],
+ oopspecindex=EffectInfo.OS_ARRAYCOPY))
for _name, _os in [
('strconcatdescr', 'OS_STR_CONCAT'),
diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
--- a/pypy/jit/metainterp/optimizeopt/unroll.py
+++ b/pypy/jit/metainterp/optimizeopt/unroll.py
@@ -75,7 +75,6 @@
self.importable_values = {}
self.emitting_dissabled = False
self.emitted_guards = 0
- self.emitted_pure_operations = {}
def ensure_imported(self, value):
if not self.emitting_dissabled and value in self.importable_values:
@@ -96,21 +95,6 @@
new = UnrollableOptimizer(self.metainterp_sd, self.loop)
return self._new(new)
- def remember_emitting_pure(self, op):
- self.emitted_pure_operations[op] = True
-
- def produce_potential_short_preamble_ops(self, sb):
- for op in self.emitted_pure_operations:
- if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \
- op.getopnum() == rop.STRGETITEM or \
- op.getopnum() == rop.UNICODEGETITEM:
- if not self.getvalue(op.getarg(1)).is_constant():
- continue
- sb.add_potential(op)
- for opt in self.optimizations:
- opt.produce_potential_short_preamble_ops(sb)
-
-
class UnrollOptimizer(Optimization):
"""Unroll the loop into two iterations. The first one will
@@ -154,7 +138,7 @@
KillHugeIntBounds(self.optimizer).apply()
- loop.preamble.operations = self.optimizer.newoperations
+ loop.preamble.operations = self.optimizer.get_newoperations()
jump_args = [self.getvalue(a).get_key_box() for a in jump_args]
start_resumedescr = loop.preamble.start_resumedescr.clone_if_mutable()
@@ -167,8 +151,9 @@
virtual_state = modifier.get_virtual_state(jump_args)
values = [self.getvalue(arg) for arg in jump_args]
- inputargs = virtual_state.make_inputargs(values)
- short_inputargs = virtual_state.make_inputargs(values, keyboxes=True)
+ inputargs = virtual_state.make_inputargs(values, self.optimizer)
+ short_inputargs = virtual_state.make_inputargs(values, self.optimizer,
+ keyboxes=True)
self.constant_inputargs = {}
for box in jump_args:
@@ -197,7 +182,7 @@
# operations needed to setup the proper state of those virtuals
# in the peeled loop
inputarg_setup_ops = []
- preamble_optimizer.newoperations = []
+ preamble_optimizer.clear_newoperations()
seen = {}
for box in inputargs:
if box in seen:
@@ -211,9 +196,8 @@
continue
seen[box] = True
value = preamble_optimizer.getvalue(box)
- value.force_box()
- preamble_optimizer.flush()
- inputarg_setup_ops += preamble_optimizer.newoperations
+ value.force_box(preamble_optimizer)
+ inputarg_setup_ops += preamble_optimizer.get_newoperations()
# Setup the state of the new optimizer by emiting the
# short preamble operations and discarding the result
@@ -244,13 +228,13 @@
virtual_state)
loop.inputargs = inputargs
- args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box()\
+ args = [preamble_optimizer.getvalue(self.short_boxes.original(a)).force_box(preamble_optimizer)\
for a in inputargs]
jmp = ResOperation(rop.JUMP, args, None)
jmp.setdescr(loop.token)
loop.preamble.operations.append(jmp)
- loop.operations = self.optimizer.newoperations
+ loop.operations = self.optimizer.get_newoperations()
maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards
if self.optimizer.emitted_guards > maxguards:
@@ -335,9 +319,10 @@
assert jumpop
original_jumpargs = jumpop.getarglist()[:]
values = [self.getvalue(arg) for arg in jumpop.getarglist()]
- jumpargs = virtual_state.make_inputargs(values)
+ jumpargs = virtual_state.make_inputargs(values, self.optimizer)
jumpop.initarglist(jumpargs)
- jmp_to_short_args = virtual_state.make_inputargs(values, keyboxes=True)
+ jmp_to_short_args = virtual_state.make_inputargs(values, self.optimizer,
+ keyboxes=True)
self.short_inliner = Inliner(short_inputargs, jmp_to_short_args)
for box, const in self.constant_inputargs.items():
@@ -347,11 +332,11 @@
newop = self.short_inliner.inline_op(op)
self.optimizer.send_extra_operation(newop)
- self.optimizer.flush()
+ newoperations = self.optimizer.get_newoperations()
i = j = 0
- while i < len(self.optimizer.newoperations) or j < len(jumpargs):
- if i == len(self.optimizer.newoperations):
+ while i < len(newoperations) or j < len(jumpargs):
+ if i == len(newoperations):
while j < len(jumpargs):
a = jumpargs[j]
if self.optimizer.loop.logops:
@@ -360,7 +345,7 @@
jumpargs, short_seen)
j += 1
else:
- op = self.optimizer.newoperations[i]
+ op = newoperations[i]
self.boxes_created_this_iteration[op.result] = True
args = op.getarglist()
@@ -375,6 +360,7 @@
self.import_box(a, inputargs, short, short_jumpargs,
jumpargs, short_seen)
i += 1
+ newoperations = self.optimizer.get_newoperations()
jumpop.initarglist(jumpargs)
self.optimizer.send_extra_operation(jumpop)
@@ -468,7 +454,7 @@
inputargs.append(box)
box = newresult
if box in self.optimizer.values:
- box = self.optimizer.values[box].force_box()
+ box = self.optimizer.values[box].force_box(self.optimizer)
jumpargs.append(box)
@@ -483,11 +469,6 @@
if op.getopnum() == rop.JUMP:
loop_token = op.getdescr()
assert isinstance(loop_token, LoopToken)
- # FIXME: Use a tree, similar to the tree formed by the full
- # preamble and it's bridges, instead of a list to save time and
- # memory. This should also allow better behaviour in
- # situations that the is_emittable() chain currently cant
- # handle and the inlining fails unexpectedly belwo.
short = loop_token.short_preamble
if short:
args = op.getarglist()
@@ -523,7 +504,7 @@
values = [self.getvalue(arg)
for arg in op.getarglist()]
- args = sh.virtual_state.make_inputargs(values,
+ args = sh.virtual_state.make_inputargs(values, self.optimizer,
keyboxes=True)
inliner = Inliner(sh.inputargs, args)
diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py
--- a/pypy/jit/metainterp/optimizeopt/virtualize.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualize.py
@@ -10,13 +10,12 @@
class AbstractVirtualValue(optimizer.OptValue):
- _attrs_ = ('optimizer', 'keybox', 'source_op', '_cached_vinfo')
+ _attrs_ = ('keybox', 'source_op', '_cached_vinfo')
box = None
level = optimizer.LEVEL_NONNULL
_cached_vinfo = None
- def __init__(self, optimizer, keybox, source_op=None):
- self.optimizer = optimizer
+ def __init__(self, keybox, source_op=None):
self.keybox = keybox # only used as a key in dictionaries
self.source_op = source_op # the NEW_WITH_VTABLE/NEW_ARRAY operation
# that builds this box
@@ -29,17 +28,17 @@
return self.keybox
return self.box
- def force_box(self):
+ def force_box(self, optforce):
if self.box is None:
- self.optimizer.forget_numberings(self.keybox)
- self._really_force()
+ optforce.forget_numberings(self.keybox)
+ self._really_force(optforce)
return self.box
- def force_at_end_of_preamble(self, already_forced):
+ def force_at_end_of_preamble(self, already_forced, optforce):
value = already_forced.get(self, None)
if value:
return value
- return OptValue(self.force_box())
+ return OptValue(self.force_box(optforce))
def make_virtual_info(self, modifier, fieldnums):
if fieldnums is None:
@@ -55,7 +54,7 @@
def _make_virtual(self, modifier):
raise NotImplementedError("abstract base")
- def _really_force(self):
+ def _really_force(self, optforce):
raise NotImplementedError("abstract base")
def import_from(self, other, optimizer):
@@ -70,10 +69,11 @@
get_fielddescrlist_cache._annspecialcase_ = "specialize:memo"
class AbstractVirtualStructValue(AbstractVirtualValue):
- _attrs_ = ('_fields', '_cached_sorted_fields')
+ _attrs_ = ('_fields', 'cpu', '_cached_sorted_fields')
- def __init__(self, optimizer, keybox, source_op=None):
- AbstractVirtualValue.__init__(self, optimizer, keybox, source_op)
+ def __init__(self, cpu, keybox, source_op=None):
+ AbstractVirtualValue.__init__(self, keybox, source_op)
+ self.cpu = cpu
self._fields = {}
self._cached_sorted_fields = None
@@ -87,45 +87,44 @@
def _get_descr(self):
raise NotImplementedError
- def _is_immutable_and_filled_with_constants(self):
+ def _is_immutable_and_filled_with_constants(self, optforce):
count = self._get_descr().count_fields_if_immutable()
if count != len(self._fields): # always the case if count == -1
return False
for value in self._fields.itervalues():
- subbox = value.force_box()
+ subbox = value.force_box(optforce)
if not isinstance(subbox, Const):
return False
return True
- def force_at_end_of_preamble(self, already_forced):
+ def force_at_end_of_preamble(self, already_forced, optforce):
if self in already_forced:
return self
already_forced[self] = self
if self._fields:
for ofs in self._fields.keys():
- self._fields[ofs] = self._fields[ofs].force_at_end_of_preamble(already_forced)
+ self._fields[ofs] = self._fields[ofs].force_at_end_of_preamble(already_forced, optforce)
return self
- def _really_force(self):
+ def _really_force(self, optforce):
op = self.source_op
assert op is not None
# ^^^ This case should not occur any more (see test_bug_3).
#
if not we_are_translated():
op.name = 'FORCE ' + self.source_op.name
-
- if self._is_immutable_and_filled_with_constants():
- box = self.optimizer.constant_fold(op)
+
+ if self._is_immutable_and_filled_with_constants(optforce):
+ box = optforce.optimizer.constant_fold(op)
self.make_constant(box)
for ofs, value in self._fields.iteritems():
- subbox = value.force_box()
+ subbox = value.force_box(optforce)
assert isinstance(subbox, Const)
- execute(self.optimizer.cpu, None, rop.SETFIELD_GC,
+ execute(optforce.optimizer.cpu, None, rop.SETFIELD_GC,
ofs, box, subbox)
# keep self._fields, because it's all immutable anyway
else:
- newoperations = self.optimizer.newoperations
- newoperations.append(op)
+ optforce.emit_operation(op)
self.box = box = op.result
#
iteritems = self._fields.iteritems()
@@ -135,10 +134,11 @@
for ofs, value in iteritems:
if value.is_null():
continue
- subbox = value.force_box()
+ subbox = value.force_box(optforce)
op = ResOperation(rop.SETFIELD_GC, [box, subbox], None,
descr=ofs)
- newoperations.append(op)
+
+ optforce.emit_operation(op)
def _get_field_descr_list(self):
_cached_sorted_fields = self._cached_sorted_fields
@@ -155,7 +155,7 @@
else:
lst = self._fields.keys()
sort_descrs(lst)
- cache = get_fielddescrlist_cache(self.optimizer.cpu)
+ cache = get_fielddescrlist_cache(self.cpu)
result = cache.get(lst, None)
if result is None:
cache[lst] = lst
@@ -180,8 +180,8 @@
class VirtualValue(AbstractVirtualStructValue):
level = optimizer.LEVEL_KNOWNCLASS
- def __init__(self, optimizer, known_class, keybox, source_op=None):
- AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op)
+ def __init__(self, cpu, known_class, keybox, source_op=None):
+ AbstractVirtualStructValue.__init__(self, cpu, keybox, source_op)
assert isinstance(known_class, Const)
self.known_class = known_class
@@ -190,7 +190,7 @@
return modifier.make_virtual(self.known_class, fielddescrs)
def _get_descr(self):
- return vtable2descr(self.optimizer.cpu, self.known_class.getint())
+ return vtable2descr(self.cpu, self.known_class.getint())
def __repr__(self):
cls_name = self.known_class.value.adr.ptr._obj._TYPE._name
@@ -201,8 +201,8 @@
class VStructValue(AbstractVirtualStructValue):
- def __init__(self, optimizer, structdescr, keybox, source_op=None):
- AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op)
+ def __init__(self, cpu, structdescr, keybox, source_op=None):
+ AbstractVirtualStructValue.__init__(self, cpu, keybox, source_op)
self.structdescr = structdescr
def _make_virtual(self, modifier):
@@ -215,10 +215,10 @@
class VArrayValue(AbstractVirtualValue):
- def __init__(self, optimizer, arraydescr, size, keybox, source_op=None):
- AbstractVirtualValue.__init__(self, optimizer, keybox, source_op)
+ def __init__(self, arraydescr, constvalue, size, keybox, source_op=None):
+ AbstractVirtualValue.__init__(self, keybox, source_op)
self.arraydescr = arraydescr
- self.constvalue = optimizer.new_const_item(arraydescr)
+ self.constvalue = constvalue
self._items = [self.constvalue] * size
def getlength(self):
@@ -232,31 +232,30 @@
assert isinstance(itemvalue, optimizer.OptValue)
self._items[index] = itemvalue
- def force_at_end_of_preamble(self, already_forced):
+ def force_at_end_of_preamble(self, already_forced, optforce):
if self in already_forced:
return self
already_forced[self] = self
for index in range(len(self._items)):
- self._items[index] = self._items[index].force_at_end_of_preamble(already_forced)
+ self._items[index] = self._items[index].force_at_end_of_preamble(already_forced, optforce)
return self
- def _really_force(self):
+ def _really_force(self, optforce):
assert self.source_op is not None
if not we_are_translated():
self.source_op.name = 'FORCE ' + self.source_op.name
- newoperations = self.optimizer.newoperations
- newoperations.append(self.source_op)
+ optforce.emit_operation(self.source_op)
self.box = box = self.source_op.result
for index in range(len(self._items)):
subvalue = self._items[index]
if subvalue is not self.constvalue:
if subvalue.is_null():
continue
- subbox = subvalue.force_box()
+ subbox = subvalue.force_box(optforce)
op = ResOperation(rop.SETARRAYITEM_GC,
[box, ConstInt(index), subbox], None,
descr=self.arraydescr)
- newoperations.append(op)
+ optforce.emit_operation(op)
def get_args_for_fail(self, modifier):
if self.box is None and not modifier.already_seen_virtual(self.keybox):
@@ -279,17 +278,18 @@
return OptVirtualize()
def make_virtual(self, known_class, box, source_op=None):
- vvalue = VirtualValue(self.optimizer, known_class, box, source_op)
+ vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op)
self.make_equal_to(box, vvalue)
return vvalue
def make_varray(self, arraydescr, size, box, source_op=None):
- vvalue = VArrayValue(self.optimizer, arraydescr, size, box, source_op)
+ constvalue = self.new_const_item(arraydescr)
+ vvalue = VArrayValue(arraydescr, constvalue, size, box, source_op)
self.make_equal_to(box, vvalue)
return vvalue
def make_vstruct(self, structdescr, box, source_op=None):
- vvalue = VStructValue(self.optimizer, structdescr, box, source_op)
+ vvalue = VStructValue(self.optimizer.cpu, structdescr, box, source_op)
self.make_equal_to(box, vvalue)
return vvalue
@@ -362,7 +362,6 @@
self.make_equal_to(op.result, fieldvalue)
else:
value.ensure_nonnull()
- ###self.heap_op_optimizer.optimize_GETFIELD_GC(op, value)
self.emit_operation(op)
# note: the following line does not mean that the two operations are
@@ -377,7 +376,6 @@
value.setfield(op.getdescr(), fieldvalue)
else:
value.ensure_nonnull()
- ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue)
self.emit_operation(op)
def optimize_NEW_WITH_VTABLE(self, op):
@@ -417,7 +415,6 @@
self.make_equal_to(op.result, itemvalue)
return
value.ensure_nonnull()
- ###self.heap_op_optimizer.optimize_GETARRAYITEM_GC(op, value)
self.emit_operation(op)
# note: the following line does not mean that the two operations are
@@ -432,7 +429,6 @@
value.setitem(indexbox.getint(), self.getvalue(op.getarg(2)))
return
value.ensure_nonnull()
- ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
self.emit_operation(op)
diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py
--- a/pypy/jit/metainterp/optimizeopt/virtualstate.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py
@@ -30,7 +30,7 @@
def _generate_guards(self, other, box, cpu, extra_guards):
raise InvalidLoop
- def enum_forced_boxes(self, boxes, value):
+ def enum_forced_boxes(self, boxes, value, optimizer):
raise NotImplementedError
def enum(self, virtual_state):
@@ -100,14 +100,14 @@
def _generalization_of(self, other):
raise NotImplementedError
- def enum_forced_boxes(self, boxes, value):
+ def enum_forced_boxes(self, boxes, value, optimizer):
assert isinstance(value, virtualize.AbstractVirtualStructValue)
assert value.is_virtual()
for i in range(len(self.fielddescrs)):
v = value._fields[self.fielddescrs[i]]
s = self.fieldstate[i]
if s.position > self.position:
- s.enum_forced_boxes(boxes, v)
+ s.enum_forced_boxes(boxes, v, optimizer)
def _enum(self, virtual_state):
for s in self.fieldstate:
@@ -177,14 +177,14 @@
return False
return True
- def enum_forced_boxes(self, boxes, value):
+ def enum_forced_boxes(self, boxes, value, optimizer):
assert isinstance(value, virtualize.VArrayValue)
assert value.is_virtual()
for i in range(len(self.fieldstate)):
v = value._items[i]
s = self.fieldstate[i]
if s.position > self.position:
- s.enum_forced_boxes(boxes, v)
+ s.enum_forced_boxes(boxes, v, optimizer)
def _enum(self, virtual_state):
for s in self.fieldstate:
@@ -316,11 +316,11 @@
import pdb; pdb.set_trace()
raise NotImplementedError
- def enum_forced_boxes(self, boxes, value):
+ def enum_forced_boxes(self, boxes, value, optimizer):
if self.level == LEVEL_CONSTANT:
return
assert 0 <= self.position_in_notvirtuals
- boxes[self.position_in_notvirtuals] = value.force_box()
+ boxes[self.position_in_notvirtuals] = value.force_box(optimizer)
def _enum(self, virtual_state):
if self.level == LEVEL_CONSTANT:
@@ -377,11 +377,13 @@
self.state[i].generate_guards(other.state[i], args[i],
cpu, extra_guards, renum)
- def make_inputargs(self, values, keyboxes=False):
+ def make_inputargs(self, values, optimizer, keyboxes=False):
+ if optimizer.optearlyforce:
+ optimizer = optimizer.optearlyforce
assert len(values) == len(self.state)
inputargs = [None] * len(self.notvirtuals)
for i in range(len(values)):
- self.state[i].enum_forced_boxes(inputargs, values[i])
+ self.state[i].enum_forced_boxes(inputargs, values[i], optimizer)
if keyboxes:
for i in range(len(values)):
@@ -434,7 +436,12 @@
def get_virtual_state(self, jump_args):
self.optimizer.force_at_end_of_preamble()
already_forced = {}
- values = [self.getvalue(box).force_at_end_of_preamble(already_forced)
+ if self.optimizer.optearlyforce:
+ opt = self.optimizer.optearlyforce
+ else:
+ opt = self.optimizer
+ values = [self.getvalue(box).force_at_end_of_preamble(already_forced,
+ opt)
for box in jump_args]
for value in values:
diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py
--- a/pypy/jit/metainterp/optimizeopt/vstring.py
+++ b/pypy/jit/metainterp/optimizeopt/vstring.py
@@ -43,7 +43,7 @@
class __extend__(optimizer.OptValue):
"""New methods added to the base class OptValue for this file."""
- def getstrlen(self, optimization, mode):
+ def getstrlen(self, string_optimizer, mode):
if mode is mode_string:
s = self.get_constant_string_spec(mode_string)
if s is not None:
@@ -52,12 +52,12 @@
s = self.get_constant_string_spec(mode_unicode)
if s is not None:
return ConstInt(len(s))
- if optimization is None:
+ if string_optimizer is None:
return None
self.ensure_nonnull()
- box = self.force_box()
+ box = self.force_box(string_optimizer)
lengthbox = BoxInt()
- optimization.propagate_forward(ResOperation(mode.STRLEN, [box], lengthbox))
+ string_optimizer.emit_operation(ResOperation(mode.STRLEN, [box], lengthbox))
return lengthbox
@specialize.arg(1)
@@ -68,25 +68,25 @@
else:
return None
- def string_copy_parts(self, optimizer, targetbox, offsetbox, mode):
+ def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode):
# Copies the pointer-to-string 'self' into the target string
# given by 'targetbox', at the specified offset. Returns the offset
# at the end of the copy.
- lengthbox = self.getstrlen(optimizer, mode)
- srcbox = self.force_box()
- return copy_str_content(optimizer, srcbox, targetbox,
+ lengthbox = self.getstrlen(string_optimizer, mode)
+ srcbox = self.force_box(string_optimizer)
+ return copy_str_content(string_optimizer, srcbox, targetbox,
CONST_0, offsetbox, lengthbox, mode)
class VAbstractStringValue(virtualize.AbstractVirtualValue):
_attrs_ = ('mode',)
- def __init__(self, optimizer, keybox, source_op, mode):
- virtualize.AbstractVirtualValue.__init__(self, optimizer, keybox,
+ def __init__(self, keybox, source_op, mode):
+ virtualize.AbstractVirtualValue.__init__(self, keybox,
source_op)
self.mode = mode
- def _really_force(self):
+ def _really_force(self, optforce):
if self.mode is mode_string:
s = self.get_constant_string_spec(mode_string)
if s is not None:
@@ -101,12 +101,12 @@
return
assert self.source_op is not None
self.box = box = self.source_op.result
- lengthbox = self.getstrlen(self.optimizer, self.mode)
+ lengthbox = self.getstrlen(optforce, self.mode)
op = ResOperation(self.mode.NEWSTR, [lengthbox], box)
if not we_are_translated():
op.name = 'FORCE'
- self.optimizer.emit_operation(op)
- self.string_copy_parts(self.optimizer, box, CONST_0, self.mode)
+ optforce.emit_operation(op)
+ self.string_copy_parts(optforce, box, CONST_0, self.mode)
class VStringPlainValue(VAbstractStringValue):
@@ -140,15 +140,20 @@
return mode.emptystr.join([mode.chr(c.box.getint())
for c in self._chars])
- def string_copy_parts(self, optimizer, targetbox, offsetbox, mode):
+ def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode):
+ if not self.is_virtual() and targetbox is not self.box:
+ lengthbox = self.getstrlen(string_optimizer, mode)
+ srcbox = self.force_box(string_optimizer)
+ return copy_str_content(string_optimizer, srcbox, targetbox,
+ CONST_0, offsetbox, lengthbox, mode)
for i in range(len(self._chars)):
- charbox = self._chars[i].force_box()
+ charbox = self._chars[i].force_box(string_optimizer)
if not (isinstance(charbox, Const) and charbox.same_constant(CONST_0)):
- optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox,
- offsetbox,
- charbox],
+ string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox,
+ offsetbox,
+ charbox],
None))
- offsetbox = _int_add(optimizer, offsetbox, CONST_1)
+ offsetbox = _int_add(string_optimizer, offsetbox, CONST_1)
return offsetbox
def get_args_for_fail(self, modifier):
@@ -182,16 +187,16 @@
self.left = left
self.right = right
- def getstrlen(self, optimizer, mode):
+ def getstrlen(self, string_optimizer, mode):
if self.lengthbox is None:
- len1box = self.left.getstrlen(optimizer, mode)
+ len1box = self.left.getstrlen(string_optimizer, mode)
if len1box is None:
return None
- len2box = self.right.getstrlen(optimizer, mode)
+ len2box = self.right.getstrlen(string_optimizer, mode)
if len2box is None:
return None
- self.lengthbox = _int_add(optimizer, len1box, len2box)
- # ^^^ may still be None, if optimizer is None
+ self.lengthbox = _int_add(string_optimizer, len1box, len2box)
+ # ^^^ may still be None, if string_optimizer is None
return self.lengthbox
@specialize.arg(1)
@@ -204,10 +209,10 @@
return None
return s1 + s2
- def string_copy_parts(self, optimizer, targetbox, offsetbox, mode):
- offsetbox = self.left.string_copy_parts(optimizer, targetbox,
+ def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode):
+ offsetbox = self.left.string_copy_parts(string_optimizer, targetbox,
offsetbox, mode)
- offsetbox = self.right.string_copy_parts(optimizer, targetbox,
+ offsetbox = self.right.string_copy_parts(string_optimizer, targetbox,
offsetbox, mode)
return offsetbox
@@ -246,8 +251,8 @@
self.vstart = vstart
self.vlength = vlength
- def getstrlen(self, _, mode):
- return self.vlength.force_box()
+ def getstrlen(self, optforce, mode):
+ return self.vlength.force_box(optforce)
@specialize.arg(1)
def get_constant_string_spec(self, mode):
@@ -262,11 +267,11 @@
return s1[start : start + length]
return None
- def string_copy_parts(self, optimizer, targetbox, offsetbox, mode):
- lengthbox = self.getstrlen(optimizer, mode)
- return copy_str_content(optimizer,
- self.vstr.force_box(), targetbox,
- self.vstart.force_box(), offsetbox,
+ def string_copy_parts(self, string_optimizer, targetbox, offsetbox, mode):
+ lengthbox = self.getstrlen(string_optimizer, mode)
+ return copy_str_content(string_optimizer,
+ self.vstr.force_box(string_optimizer), targetbox,
+ self.vstart.force_box(string_optimizer), offsetbox,
lengthbox, mode)
def get_args_for_fail(self, modifier):
@@ -295,7 +300,7 @@
return modifier.make_vstrslice(self.mode is mode_unicode)
-def copy_str_content(optimizer, srcbox, targetbox,
+def copy_str_content(string_optimizer, srcbox, targetbox,
srcoffsetbox, offsetbox, lengthbox, mode, need_next_offset=True):
if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const):
M = 5
@@ -305,26 +310,26 @@
# up to M characters are done "inline", i.e. with STRGETITEM/STRSETITEM
# instead of just a COPYSTRCONTENT.
for i in range(lengthbox.value):
- charbox = _strgetitem(optimizer, srcbox, srcoffsetbox, mode)
- srcoffsetbox = _int_add(optimizer, srcoffsetbox, CONST_1)
- optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox,
- offsetbox,
- charbox],
+ charbox = _strgetitem(string_optimizer, srcbox, srcoffsetbox, mode)
+ srcoffsetbox = _int_add(string_optimizer, srcoffsetbox, CONST_1)
+ string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, [targetbox,
+ offsetbox,
+ charbox],
None))
- offsetbox = _int_add(optimizer, offsetbox, CONST_1)
+ offsetbox = _int_add(string_optimizer, offsetbox, CONST_1)
else:
if need_next_offset:
- nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox)
+ nextoffsetbox = _int_add(string_optimizer, offsetbox, lengthbox)
else:
nextoffsetbox = None
op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox,
srcoffsetbox, offsetbox,
lengthbox], None)
- optimizer.emit_operation(op)
+ string_optimizer.emit_operation(op)
offsetbox = nextoffsetbox
return offsetbox
-def _int_add(optimizer, box1, box2):
+def _int_add(string_optimizer, box1, box2):
if isinstance(box1, ConstInt):
if box1.value == 0:
return box2
@@ -332,23 +337,23 @@
return ConstInt(box1.value + box2.value)
elif isinstance(box2, ConstInt) and box2.value == 0:
return box1
- if optimizer is None:
+ if string_optimizer is None:
return None
resbox = BoxInt()
- optimizer.propagate_forward(ResOperation(rop.INT_ADD, [box1, box2], resbox))
+ string_optimizer.emit_operation(ResOperation(rop.INT_ADD, [box1, box2], resbox))
return resbox
-def _int_sub(optimizer, box1, box2):
+def _int_sub(string_optimizer, box1, box2):
if isinstance(box2, ConstInt):
if box2.value == 0:
return box1
if isinstance(box1, ConstInt):
return ConstInt(box1.value - box2.value)
resbox = BoxInt()
- optimizer.propagate_forward(ResOperation(rop.INT_SUB, [box1, box2], resbox))
+ string_optimizer.emit_operation(ResOperation(rop.INT_SUB, [box1, box2], resbox))
return resbox
-def _strgetitem(optimizer, strbox, indexbox, mode):
+def _strgetitem(string_optimizer, strbox, indexbox, mode):
if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt):
if mode is mode_string:
s = strbox.getref(lltype.Ptr(rstr.STR))
@@ -357,30 +362,28 @@
s = strbox.getref(lltype.Ptr(rstr.UNICODE))
return ConstInt(ord(s.chars[indexbox.getint()]))
resbox = BoxInt()
- optimizer.propagate_forward(ResOperation(mode.STRGETITEM, [strbox, indexbox],
- resbox))
+ string_optimizer.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox],
+ resbox))
return resbox
class OptString(optimizer.Optimization):
"Handling of strings and unicodes."
- enabled = True
-
def new(self):
return OptString()
def make_vstring_plain(self, box, source_op, mode):
- vvalue = VStringPlainValue(self.optimizer, box, source_op, mode)
+ vvalue = VStringPlainValue(box, source_op, mode)
self.make_equal_to(box, vvalue)
return vvalue
def make_vstring_concat(self, box, source_op, mode):
- vvalue = VStringConcatValue(self.optimizer, box, source_op, mode)
+ vvalue = VStringConcatValue(box, source_op, mode)
self.make_equal_to(box, vvalue)
return vvalue
def make_vstring_slice(self, box, source_op, mode):
- vvalue = VStringSliceValue(self.optimizer, box, source_op, mode)
+ vvalue = VStringSliceValue(box, source_op, mode)
self.make_equal_to(box, vvalue)
return vvalue
@@ -430,9 +433,9 @@
value.ensure_nonnull()
#
if value.is_virtual() and isinstance(value, VStringSliceValue):
- fullindexbox = _int_add(self.optimizer,
- value.vstart.force_box(),
- vindex.force_box())
+ fullindexbox = _int_add(self,
+ value.vstart.force_box(self),
+ vindex.force_box(self))
value = value.vstr
vindex = self.getvalue(fullindexbox)
#
@@ -444,7 +447,7 @@
if res is not optimizer.CVAL_UNINITIALIZED_ZERO:
return res
#
- resbox = _strgetitem(self.optimizer, value.force_box(), vindex.force_box(), mode)
+ resbox = _strgetitem(self, value.force_box(self), vindex.force_box(self), mode)
return self.getvalue(resbox)
def optimize_STRLEN(self, op):
@@ -454,7 +457,7 @@
def _optimize_STRLEN(self, op, mode):
value = self.getvalue(op.getarg(0))
- lengthbox = value.getstrlen(self.optimizer, mode)
+ lengthbox = value.getstrlen(self, mode)
self.make_equal_to(op.result, self.getvalue(lengthbox))
def optimize_COPYSTRCONTENT(self, op):
@@ -472,12 +475,12 @@
if length.is_constant() and length.box.getint() == 0:
return
- copy_str_content(self.optimizer,
- src.force_box(),
- dst.force_box(),
- srcstart.force_box(),
- dststart.force_box(),
- length.force_box(),
+ copy_str_content(self,
+ src.force_box(self),
+ dst.force_box(self),
+ srcstart.force_box(self),
+ dststart.force_box(self),
+ length.force_box(self),
mode, need_next_offset=False
)
@@ -503,6 +506,8 @@
return
self.emit_operation(op)
+ optimize_CALL_PURE = optimize_CALL
+
def opt_call_str_STR2UNICODE(self, op):
# Constant-fold unicode("constant string").
# More generally, supporting non-constant but virtual cases is
@@ -542,16 +547,16 @@
return True
#
vstr.ensure_nonnull()
- lengthbox = _int_sub(self.optimizer, vstop.force_box(),
- vstart.force_box())
+ lengthbox = _int_sub(self, vstop.force_box(self),
+ vstart.force_box(self))
#
if isinstance(vstr, VStringSliceValue):
# double slicing s[i:j][k:l]
vintermediate = vstr
vstr = vintermediate.vstr
- startbox = _int_add(self.optimizer,
- vintermediate.vstart.force_box(),
- vstart.force_box())
+ startbox = _int_add(self,
+ vintermediate.vstart.force_box(self),
+ vstart.force_box(self))
vstart = self.getvalue(startbox)
#
value = self.make_vstring_slice(op.result, op, mode)
@@ -589,8 +594,8 @@
do = EffectInfo.OS_STREQ_LENGTHOK
else:
do = EffectInfo.OS_STREQ_NONNULL
- self.generate_modified_call(do, [v1.force_box(),
- v2.force_box()], op.result, mode)
+ self.generate_modified_call(do, [v1.force_box(self),
+ v2.force_box(self)], op.result, mode)
return True
return False
@@ -598,7 +603,7 @@
l2box = v2.getstrlen(None, mode)
if isinstance(l2box, ConstInt):
if l2box.value == 0:
- lengthbox = v1.getstrlen(self.optimizer, mode)
+ lengthbox = v1.getstrlen(self, mode)
seo = self.optimizer.send_extra_operation
seo(ResOperation(rop.INT_EQ, [lengthbox, CONST_0], resultbox))
return True
@@ -609,17 +614,17 @@
vchar1 = self.strgetitem(v1, optimizer.CVAL_ZERO, mode)
vchar2 = self.strgetitem(v2, optimizer.CVAL_ZERO, mode)
seo = self.optimizer.send_extra_operation
- seo(ResOperation(rop.INT_EQ, [vchar1.force_box(),
- vchar2.force_box()],
+ seo(ResOperation(rop.INT_EQ, [vchar1.force_box(self),
+ vchar2.force_box(self)],
resultbox))
return True
if isinstance(v1, VStringSliceValue):
vchar = self.strgetitem(v2, optimizer.CVAL_ZERO, mode)
do = EffectInfo.OS_STREQ_SLICE_CHAR
- self.generate_modified_call(do, [v1.vstr.force_box(),
- v1.vstart.force_box(),
- v1.vlength.force_box(),
- vchar.force_box()],
+ self.generate_modified_call(do, [v1.vstr.force_box(self),
+ v1.vstart.force_box(self),
+ v1.vlength.force_box(self),
+ vchar.force_box(self)],
resultbox, mode)
return True
#
@@ -630,10 +635,10 @@
if v1.is_null():
self.make_constant(resultbox, CONST_1)
return True
- op = ResOperation(rop.PTR_EQ, [v1.force_box(),
+ op = ResOperation(rop.PTR_EQ, [v1.force_box(self),
llhelper.CONST_NULL],
resultbox)
- self.optimizer.emit_operation(op)
+ self.emit_operation(op)
return True
#
return False
@@ -647,8 +652,8 @@
do = EffectInfo.OS_STREQ_NONNULL_CHAR
else:
do = EffectInfo.OS_STREQ_CHECKNULL_CHAR
- self.generate_modified_call(do, [v1.force_box(),
- vchar.force_box()], resultbox,
+ self.generate_modified_call(do, [v1.force_box(self),
+ vchar.force_box(self)], resultbox,
mode)
return True
#
@@ -657,10 +662,10 @@
do = EffectInfo.OS_STREQ_SLICE_NONNULL
else:
do = EffectInfo.OS_STREQ_SLICE_CHECKNULL
- self.generate_modified_call(do, [v1.vstr.force_box(),
- v1.vstart.force_box(),
- v1.vlength.force_box(),
- v2.force_box()], resultbox, mode)
+ self.generate_modified_call(do, [v1.vstr.force_box(self),
+ v1.vstart.force_box(self),
+ v1.vlength.force_box(self),
+ v2.force_box(self)], resultbox, mode)
return True
return False
@@ -670,16 +675,11 @@
calldescr, func = cic.callinfo_for_oopspec(oopspecindex)
op = ResOperation(rop.CALL, [ConstInt(func)] + args, result,
descr=calldescr)
- self.optimizer.emit_operation(op)
+ self.emit_operation(op)
def propagate_forward(self, op):
- if not self.enabled:
- self.emit_operation(op)
- return
-
dispatch_opt(self, op)
-
dispatch_opt = make_dispatcher_method(OptString, 'optimize_',
default=OptString.emit_operation)
diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py
--- a/pypy/jit/metainterp/resoperation.py
+++ b/pypy/jit/metainterp/resoperation.py
@@ -221,7 +221,6 @@
newop.setfailargs(self.getfailargs())
return newop
-
# ============
# arity mixins
# ============
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -2956,6 +2956,18 @@
assert res == f(32)
self.check_loops(arraylen_gc=2)
+ def test_ulonglong_mod(self):
+ myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i'])
+ def f(n):
+ sa = i = rffi.cast(rffi.ULONGLONG, 1)
+ while i < rffi.cast(rffi.ULONGLONG, n):
+ myjitdriver.jit_merge_point(sa=sa, n=n, i=i)
+ sa += sa % i
+ i += 1
+ res = self.meta_interp(f, [32])
+ assert res == f(32)
+
+
class TestOOtype(BasicTests, OOJitMixin):
def test_oohash(self):
@@ -3408,6 +3420,75 @@
assert res == main(1, 10)
self.check_loops(call=0)
+ def test_setarrayitem_followed_by_arraycopy(self):
+ myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'x', 'y'])
+ def f(n):
+ sa = 0
+ x = [1,2,n]
+ y = [1,2,3]
+ while n > 0:
+ myjitdriver.jit_merge_point(sa=sa, n=n, x=x, y=y)
+ y[0] = n
+ x[0:3] = y
+ sa += x[0]
+ n -= 1
+ return sa
+ res = self.meta_interp(f, [16])
+ assert res == f(16)
+
+
class TestLLtype(BaseLLtypeTests, LLJitMixin):
- pass
+ def test_tagged(self):
+ py.test.skip("implement me")
+ from pypy.rlib.objectmodel import UnboxedValue
+ class Base(object):
+ __slots__ = ()
+
+ class Int(UnboxedValue, Base):
+ __slots__ = ["a"]
+
+ def is_pos(self):
+ return self.a > 0
+
+ def dec(self):
+ return Int(self.a - 1)
+
+
+ class Float(Base):
+ def __init__(self, a):
+ self.a = a
+
+ def is_pos(self):
+ return self.a > 0
+
+ def dec(self):
+ return Float(self.a - 1)
+
+ driver = JitDriver(greens=['pc', 's'], reds=['o'])
+
+ def main(fl, n, s):
+ if s:
+ s = "--j"
+ else:
+ s = "---j"
+ if fl:
+ o = Float(float(n))
+ else:
+ o = Int(n)
+ pc = 0
+ while True:
+ driver.jit_merge_point(s=s, pc=pc, o=o)
+ c = s[pc]
+ if c == "j":
+ driver.can_enter_jit(s=s, pc=pc, o=o)
+ if o.is_pos():
+ pc = 0
+ continue
+ else:
+ break
+ elif c == "-":
+ o = o.dec()
+ pc += 1
+ return pc
+ res = self.meta_interp(main, [False, 100, True], taggedpointers=True)
diff --git a/pypy/jit/metainterp/test/test_longlong.py b/pypy/jit/metainterp/test/test_longlong.py
--- a/pypy/jit/metainterp/test/test_longlong.py
+++ b/pypy/jit/metainterp/test/test_longlong.py
@@ -118,6 +118,26 @@
res = self.interp_operations(f, [1000000000])
assert res == 123500000000.0
+ def test_floats_negative(self):
+ def f(i):
+ # i == 1000000000
+ f = i * -123.5
+ n = r_longlong(f)
+ compare(n, -29, 1054051584)
+ return float(n)
+ res = self.interp_operations(f, [1000000000])
+ assert res == -123500000000.0
+
+ def test_floats_ulonglong(self):
+ def f(i):
+ # i == 1000000000
+ f = i * 12350000000.0
+ n = r_ulonglong(f)
+ compare(n, -1419508847, 538116096)
+ return float(n)
+ res = self.interp_operations(f, [1000000000])
+ assert res == 12350000000000000000.0
+
def test_unsigned_compare_ops(self):
def f(n1, n2):
# n == 30002000000000
diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py
--- a/pypy/jit/metainterp/test/test_resume.py
+++ b/pypy/jit/metainterp/test/test_resume.py
@@ -576,8 +576,9 @@
class FakeOptimizer_VirtualValue(object):
- class cpu:
- pass
+ class optimizer:
+ class cpu:
+ pass
fakeoptimizer = FakeOptimizer_VirtualValue()
def ConstAddr(addr, cpu): # compatibility
@@ -1135,12 +1136,7 @@
modifier.liveboxes = {}
modifier.vfieldboxes = {}
- class FakeOptimizer(object):
- class cpu:
- pass
- def new_const_item(self, descr):
- return None
- v2 = VArrayValue(FakeOptimizer(), LLtypeMixin.arraydescr, 2, b2s)
+ v2 = VArrayValue(LLtypeMixin.arraydescr, None, 2, b2s)
v2._items = [b4s, c1s]
modifier.register_virtual_fields(b2s, [b4s, c1s])
liveboxes = []
diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py
--- a/pypy/jit/metainterp/test/test_string.py
+++ b/pypy/jit/metainterp/test/test_string.py
@@ -484,6 +484,30 @@
return len(sa.val)
assert self.meta_interp(f, ['a']) == f('a')
+ def test_string_comepare_quasiimmutable(self):
+ class Sys(object):
+ _immutable_fields_ = ["defaultencoding?"]
+ def __init__(self, s):
+ self.defaultencoding = s
+ _str = self._str
+ sys = Sys(_str('ascii'))
+ mydriver = JitDriver(reds = ['n', 'sa'], greens = [])
+ def f(n):
+ sa = 0
+ sys.defaultencoding = _str('ascii')
+ while n:
+ mydriver.jit_merge_point(n=n, sa=sa)
+ if sys.defaultencoding == _str('ascii'):
+ sa += 1
+ n -= 1
+ sys.defaultencoding = _str('utf-8')
+ return sa
+ assert self.meta_interp(f, [8]) == f(8)
+ self.check_loops({'int_add': 1, 'guard_true': 1, 'int_sub': 1,
+ 'jump': 1, 'int_is_true': 1,
+ 'guard_not_invalidated': 1})
+
+
#class TestOOtype(StringTests, OOJitMixin):
# CALL = "oosend"
# CALL_PURE = "oosend_pure"
@@ -575,4 +599,18 @@
n -= 1
return result
res = self.meta_interp(main, [9])
- assert res == main(9)
\ No newline at end of file
+ assert res == main(9)
+
+ def test_virtual_copystringcontent2(self):
+ jitdriver = JitDriver(reds=['n', 'result'], greens=[])
+ def main(n):
+ result = 0
+ while n >= 0:
+ jitdriver.jit_merge_point(n=n, result=result)
+ b = StringBuilder(6)
+ b.append("Hello!")
+ result += ord((b.build() + "xyz")[0])
+ n -= 1
+ return result
+ res = self.meta_interp(main, [9])
+ assert res == main(9)
diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py
--- a/pypy/jit/metainterp/test/test_virtualstate.py
+++ b/pypy/jit/metainterp/test/test_virtualstate.py
@@ -431,7 +431,7 @@
class BaseTestBridges(BaseTest):
- enable_opts = "intbounds:rewrite:virtualize:string:heap:unroll"
+ enable_opts = "intbounds:rewrite:virtualize:string:pure:heap:unroll"
def _do_optimize_bridge(self, bridge, call_pure_results):
from pypy.jit.metainterp.optimizeopt import optimize_bridge_1, build_opt_chain
diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py
--- a/pypy/jit/metainterp/warmspot.py
+++ b/pypy/jit/metainterp/warmspot.py
@@ -53,6 +53,8 @@
extraconfigopts = {'translation.list_comprehension_operations': True}
else:
extraconfigopts = {}
+ if kwds.pop("taggedpointers", False):
+ extraconfigopts["translation.taggedpointers"] = True
interp, graph = get_interpreter(function, args,
backendopt=False, # will be done below
type_system=type_system,
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -3,13 +3,13 @@
"""
+from pypy.interpreter.baseobjspace import Wrappable
from pypy.interpreter.error import OperationError
-from pypy.interpreter.gateway import NoneNotWrapped
-from pypy.interpreter.gateway import interp2app, unwrap_spec
+from pypy.interpreter.gateway import NoneNotWrapped, interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.baseobjspace import Wrappable
+from pypy.rlib import jit
+from pypy.rlib.objectmodel import specialize
from pypy.rlib.rarithmetic import r_uint, intmask
-from pypy.rlib.objectmodel import specialize
from pypy.rlib.rbigint import rbigint
@@ -134,29 +134,15 @@
@specialize.arg(2)
+ at jit.look_inside_iff(lambda space, args, implementation_of:
+ jit.isconstant(len(args.arguments_w)) and
+ len(args.arguments_w) == 2
+)
def min_max(space, args, implementation_of):
if implementation_of == "max":
compare = space.gt
else:
compare = space.lt
-
- args_w = args.arguments_w
- if len(args_w) == 2 and not args.keywords:
- # simple case, suitable for the JIT
- w_arg0, w_arg1 = args_w
- if space.is_true(compare(w_arg0, w_arg1)):
- return w_arg0
- else:
- return w_arg1
- else:
- return min_max_loop(space, args, implementation_of)
-
- at specialize.arg(2)
-def min_max_loop(space, args, implementation_of):
- if implementation_of == "max":
- compare = space.gt
- else:
- compare = space.lt
args_w = args.arguments_w
if len(args_w) > 1:
w_sequence = space.newtuple(args_w)
diff --git a/pypy/module/_continuation/__init__.py b/pypy/module/_continuation/__init__.py
--- a/pypy/module/_continuation/__init__.py
+++ b/pypy/module/_continuation/__init__.py
@@ -37,4 +37,5 @@
interpleveldefs = {
'continulet': 'interp_continuation.W_Continulet',
'permute': 'interp_continuation.permute',
+ '_p': 'interp_continuation.unpickle', # pickle support
}
diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -6,6 +6,7 @@
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.pycode import PyCode
+from pypy.interpreter.pyframe import PyFrame
class W_Continulet(Wrappable):
@@ -23,24 +24,27 @@
if ec.stacklet_thread is not self.sthread:
global_state.clear()
raise geterror(self.space, "inter-thread support is missing")
- return ec
def descr_init(self, w_callable, __args__):
if self.sthread is not None:
raise geterror(self.space, "continulet already __init__ialized")
+ #
+ # hackish: build the frame "by hand", passing it the correct arguments
+ space = self.space
+ w_args, w_kwds = __args__.topacked()
+ bottomframe = space.createframe(get_entrypoint_pycode(space),
+ get_w_module_dict(space), None)
+ bottomframe.locals_stack_w[0] = space.wrap(self)
+ bottomframe.locals_stack_w[1] = w_callable
+ bottomframe.locals_stack_w[2] = w_args
+ bottomframe.locals_stack_w[3] = w_kwds
+ self.bottomframe = bottomframe
+ #
global_state.origin = self
- global_state.w_callable = w_callable
- global_state.args = __args__
- self.bottomframe = make_fresh_frame(self.space)
- self.sthread = build_sthread(self.space)
- try:
- self.h = self.sthread.new(new_stacklet_callback)
- if self.sthread.is_empty_handle(self.h): # early return
- raise MemoryError
- except MemoryError:
- self.sthread = None
- global_state.clear()
- raise getmemoryerror(self.space)
+ sthread = build_sthread(self.space)
+ self.sthread = sthread
+ h = sthread.new(new_stacklet_callback)
+ post_switch(sthread, h)
def switch(self, w_to):
sthread = self.sthread
@@ -66,7 +70,7 @@
if sthread.is_empty_handle(to.h):
global_state.clear()
raise geterror(self.space, "continulet already finished")
- ec = self.check_sthread()
+ self.check_sthread()
#
global_state.origin = self
if to is None:
@@ -76,13 +80,8 @@
# double switch: the final destination is to.h
global_state.destination = to
#
- try:
- do_switch(sthread, global_state.destination.h)
- except MemoryError:
- global_state.clear()
- raise getmemoryerror(self.space)
- #
- return get_result()
+ h = sthread.switch(global_state.destination.h)
+ return post_switch(sthread, h)
def descr_switch(self, w_value=None, w_to=None):
global_state.w_value = w_value
@@ -109,12 +108,26 @@
and not self.sthread.is_empty_handle(self.h))
return self.space.newbool(valid)
+ def descr__reduce__(self):
+ from pypy.module._continuation import interp_pickle
+ return interp_pickle.reduce(self)
+
+ def descr__setstate__(self, w_args):
+ from pypy.module._continuation import interp_pickle
+ interp_pickle.setstate(self, w_args)
+
def W_Continulet___new__(space, w_subtype, __args__):
r = space.allocate_instance(W_Continulet, w_subtype)
r.__init__(space)
return space.wrap(r)
+def unpickle(space, w_subtype):
+ """Pickle support."""
+ r = space.allocate_instance(W_Continulet, w_subtype)
+ r.__init__(space)
+ return space.wrap(r)
+
W_Continulet.typedef = TypeDef(
'continulet',
@@ -124,9 +137,10 @@
switch = interp2app(W_Continulet.descr_switch),
throw = interp2app(W_Continulet.descr_throw),
is_pending = interp2app(W_Continulet.descr_is_pending),
+ __reduce__ = interp2app(W_Continulet.descr__reduce__),
+ __setstate__= interp2app(W_Continulet.descr__setstate__),
)
-
# ____________________________________________________________
# Continulet objects maintain a dummy frame object in order to ensure
@@ -135,27 +149,40 @@
class State:
def __init__(self, space):
- from pypy.interpreter.astcompiler.consts import CO_OPTIMIZED
- self.space = space
+ self.space = space
w_module = space.getbuiltinmodule('_continuation')
self.w_error = space.getattr(w_module, space.wrap('error'))
- self.w_memoryerror = OperationError(space.w_MemoryError, space.w_None)
- self.dummy_pycode = PyCode(space, 0, 0, 0, CO_OPTIMIZED,
- '', [], [], [], '',
- '<bottom of continulet>', 0, '', [], [],
- hidden_applevel=True)
+ # the following function switches away immediately, so that
+ # continulet.__init__() doesn't immediately run func(), but it
+ # also has the hidden purpose of making sure we have a single
+ # bottomframe for the whole duration of the continulet's run.
+ # Hackish: only the func_code is used, and used in the context
+ # of w_globals == this module, so we can access the name
+ # 'continulet' directly.
+ w_code = space.appexec([], '''():
+ def start(c, func, args, kwds):
+ if continulet.switch(c) is not None:
+ raise TypeError(
+ "can\'t send non-None value to a just-started continulet")
+ return func(c, *args, **kwds)
+ return start.func_code
+ ''')
+ self.entrypoint_pycode = space.interp_w(PyCode, w_code)
+ self.entrypoint_pycode.hidden_applevel = True
+ self.w_unpickle = w_module.get('_p')
+ self.w_module_dict = w_module.getdict(space)
def geterror(space, message):
cs = space.fromcache(State)
return OperationError(cs.w_error, space.wrap(message))
-def getmemoryerror(space):
+def get_entrypoint_pycode(space):
cs = space.fromcache(State)
- return cs.w_memoryerror
+ return cs.entrypoint_pycode
-def make_fresh_frame(space):
+def get_w_module_dict(space):
cs = space.fromcache(State)
- return space.FrameClass(space, cs.dummy_pycode, None, None)
+ return cs.w_module_dict
# ____________________________________________________________
@@ -166,6 +193,9 @@
StackletThread.__init__(self, space.config)
self.space = space
self.ec = ec
+ # for unpickling
+ from pypy.rlib.rweakref import RWeakKeyDictionary
+ self.frame2continulet = RWeakKeyDictionary(PyFrame, W_Continulet)
ExecutionContext.stacklet_thread = None
@@ -176,8 +206,6 @@
def clear(self):
self.origin = None
self.destination = None
- self.w_callable = None
- self.args = None
self.w_value = None
self.propagate_exception = None
global_state = GlobalState()
@@ -185,27 +213,13 @@
def new_stacklet_callback(h, arg):
- self = global_state.origin
- w_callable = global_state.w_callable
- args = global_state.args
+ self = global_state.origin
+ self.h = h
global_state.clear()
- try:
- do_switch(self.sthread, h)
- except MemoryError:
- return h # oups! do an early return in this case
- #
space = self.space
try:
- assert self.sthread.ec.topframeref() is None
- self.sthread.ec.topframeref = jit.non_virtual_ref(self.bottomframe)
- if global_state.propagate_exception is not None:
- raise global_state.propagate_exception # just propagate it further
- if global_state.w_value is not space.w_None:
- raise OperationError(space.w_TypeError, space.wrap(
- "can't send non-None value to a just-started continulet"))
-
- args = args.prepend(self.space.wrap(self))
- w_result = space.call_args(w_callable, args)
+ frame = self.bottomframe
+ w_result = frame.execute_frame()
except Exception, e:
global_state.propagate_exception = e
else:
@@ -215,9 +229,7 @@
global_state.destination = self
return self.h
-
-def do_switch(sthread, h):
- h = sthread.switch(h)
+def post_switch(sthread, h):
origin = global_state.origin
self = global_state.destination
global_state.origin = None
@@ -228,6 +240,8 @@
sthread.ec.topframeref = self.bottomframe.f_backref
self.bottomframe.f_backref = origin.bottomframe.f_backref
origin.bottomframe.f_backref = current
+ #
+ return get_result()
def get_result():
if global_state.propagate_exception:
diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_continuation/interp_pickle.py
@@ -0,0 +1,128 @@
+from pypy.tool import stdlib_opcode as pythonopcode
+from pypy.rlib import jit
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.pyframe import PyFrame
+from pypy.module._continuation.interp_continuation import State, global_state
+from pypy.module._continuation.interp_continuation import build_sthread
+from pypy.module._continuation.interp_continuation import post_switch
+from pypy.module._continuation.interp_continuation import get_result, geterror
+
+
+def getunpickle(space):
+ cs = space.fromcache(State)
+ return cs.w_unpickle
+
+
+def reduce(self):
+ # xxx this is known to be not completely correct with respect
+ # to subclasses, e.g. no __slots__ support, no looking for a
+ # __getnewargs__ or __getstate__ defined in the subclass, etc.
+ # Doing the right thing looks involved, though...
+ space = self.space
+ if self.sthread is None:
+ w_frame = space.w_False
+ elif self.sthread.is_empty_handle(self.h):
+ w_frame = space.w_None
+ else:
+ w_frame = space.wrap(self.bottomframe)
+ w_continulet_type = space.type(space.wrap(self))
+ w_dict = self.getdict(space) or space.w_None
+ args = [getunpickle(space),
+ space.newtuple([w_continulet_type]),
+ space.newtuple([w_frame, w_dict]),
+ ]
+ return space.newtuple(args)
+
+def setstate(self, w_args):
+ space = self.space
+ if self.sthread is not None:
+ raise geterror(space, "continulet.__setstate__() on an already-"
+ "initialized continulet")
+ w_frame, w_dict = space.fixedview(w_args, expected_length=2)
+ if not space.is_w(w_dict, space.w_None):
+ self.setdict(space, w_dict)
+ if space.is_w(w_frame, space.w_False):
+ return # not initialized
+ sthread = build_sthread(self.space)
+ self.sthread = sthread
+ self.bottomframe = space.interp_w(PyFrame, w_frame, can_be_None=True)
+ #
+ global_state.origin = self
+ if self.bottomframe is not None:
+ sthread.frame2continulet.set(self.bottomframe, self)
+ self.h = sthread.new(resume_trampoline_callback)
+ get_result() # propagate the eventual MemoryError
+
+# ____________________________________________________________
+
+def resume_trampoline_callback(h, arg):
+ self = global_state.origin
+ self.h = h
+ space = self.space
+ sthread = self.sthread
+ try:
+ global_state.clear()
+ if self.bottomframe is None:
+ w_result = space.w_None
+ else:
+ h = sthread.switch(self.h)
+ try:
+ w_result = post_switch(sthread, h)
+ operr = None
+ except OperationError, e:
+ w_result = None
+ operr = e
+ #
+ while True:
+ ec = sthread.ec
+ frame = ec.topframeref()
+ assert frame is not None # XXX better error message
+ exit_continulet = sthread.frame2continulet.get(frame)
+ #
+ continue_after_call(frame)
+ #
+ # small hack: unlink frame out of the execution context,
+ # because execute_frame will add it there again
+ ec.topframeref = frame.f_backref
+ #
+ try:
+ w_result = frame.execute_frame(w_result, operr)
+ operr = None
+ except OperationError, e:
+ w_result = None
+ operr = e
+ if exit_continulet is not None:
+ self = exit_continulet
+ break
+ sthread.ec.topframeref = jit.vref_None
+ if operr:
+ raise operr
+ except Exception, e:
+ global_state.propagate_exception = e
+ else:
+ global_state.w_value = w_result
+ global_state.origin = self
+ global_state.destination = self
+ return self.h
+
+def continue_after_call(frame):
+ code = frame.pycode.co_code
+ instr = frame.last_instr
+ opcode = ord(code[instr])
+ map = pythonopcode.opmap
+ call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'],
+ map['CALL_FUNCTION_VAR'], map['CALL_FUNCTION_VAR_KW'],
+ map['CALL_METHOD']]
+ assert opcode in call_ops # XXX check better, and complain better
+ instr += 1
+ oparg = ord(code[instr]) | ord(code[instr + 1]) << 8
+ nargs = oparg & 0xff
+ nkwds = (oparg >> 8) & 0xff
+ if nkwds == 0: # only positional arguments
+ # fast paths leaves things on the stack, pop them
+ if (frame.space.config.objspace.opcodes.CALL_METHOD and
+ opcode == map['CALL_METHOD']):
+ frame.dropvalues(nargs + 2)
+ elif opcode == map['CALL_FUNCTION']:
+ frame.dropvalues(nargs + 1)
+ frame.last_instr = instr + 1 # continue after the call
diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -13,7 +13,7 @@
from _continuation import continulet
#
def empty_callback(c):
- pass
+ never_called
#
c = continulet(empty_callback)
assert type(c) is continulet
@@ -36,7 +36,7 @@
from _continuation import continulet, error
#
def empty_callback(c1):
- pass
+ never_called
#
c = continulet(empty_callback)
raises(error, c.__init__, empty_callback)
diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_continuation/test/test_zpickle.py
@@ -0,0 +1,262 @@
+from pypy.conftest import gettestobjspace
+
+
+class AppTestCopy:
+ def setup_class(cls):
+ cls.space = gettestobjspace(usemodules=('_continuation',),
+ CALL_METHOD=True)
+ cls.space.config.translation.continuation = True
+
+ def test_basic_setup(self):
+ from _continuation import continulet
+ lst = [4]
+ co = continulet(lst.append)
+ assert lst == [4]
+ res = co.switch()
+ assert res is None
+ assert lst == [4, co]
+
+ def test_copy_continulet_not_started(self):
+ from _continuation import continulet, error
+ import copy
+ lst = []
+ co = continulet(lst.append)
+ co2, lst2 = copy.deepcopy((co, lst))
+ #
+ assert lst == []
+ co.switch()
+ assert lst == [co]
+ #
+ assert lst2 == []
+ co2.switch()
+ assert lst2 == [co2]
+
+ def test_copy_continulet_not_started_multiple(self):
+ from _continuation import continulet, error
+ import copy
+ lst = []
+ co = continulet(lst.append)
+ co2, lst2 = copy.deepcopy((co, lst))
+ co3, lst3 = copy.deepcopy((co, lst))
+ co4, lst4 = copy.deepcopy((co, lst))
+ #
+ assert lst == []
+ co.switch()
+ assert lst == [co]
+ #
+ assert lst2 == []
+ co2.switch()
+ assert lst2 == [co2]
+ #
+ assert lst3 == []
+ co3.switch()
+ assert lst3 == [co3]
+ #
+ assert lst4 == []
+ co4.switch()
+ assert lst4 == [co4]
+
+ def test_copy_continulet_real(self):
+ import new, sys
+ mod = new.module('test_copy_continulet_real')
+ sys.modules['test_copy_continulet_real'] = mod
+ exec '''if 1:
+ from _continuation import continulet
+ import copy
+ def f(co, x):
+ co.switch(x + 1)
+ co.switch(x + 2)
+ return x + 3
+ co = continulet(f, 40)
+ res = co.switch()
+ assert res == 41
+ co2 = copy.deepcopy(co)
+ #
+ res = co2.switch()
+ assert res == 42
+ assert co2.is_pending()
+ res = co2.switch()
+ assert res == 43
+ assert not co2.is_pending()
+ #
+ res = co.switch()
+ assert res == 42
+ assert co.is_pending()
+ res = co.switch()
+ assert res == 43
+ assert not co.is_pending()
+ ''' in mod.__dict__
+
+ def test_copy_continulet_already_finished(self):
+ from _continuation import continulet, error
+ import copy
+ lst = []
+ co = continulet(lst.append)
+ co.switch()
+ co2 = copy.deepcopy(co)
+ assert not co.is_pending()
+ assert not co2.is_pending()
+ raises(error, co.__init__, lst.append)
+ raises(error, co2.__init__, lst.append)
+ raises(error, co.switch)
+ raises(error, co2.switch)
+
+
+class AppTestPickle:
+ version = 0
+
+ def setup_class(cls):
+ cls.space = gettestobjspace(usemodules=('_continuation',),
+ CALL_METHOD=True)
+ cls.space.appexec([], """():
+ global continulet, A, __name__
+
+ import sys
+ __name__ = 'test_pickle_continulet'
+ thismodule = type(sys)(__name__)
+ sys.modules[__name__] = thismodule
+
+ from _continuation import continulet
+ class A(continulet):
+ pass
+
+ thismodule.__dict__.update(globals())
+ """)
+ cls.w_version = cls.space.wrap(cls.version)
+
+ def test_pickle_continulet_empty(self):
+ from _continuation import continulet
+ lst = [4]
+ co = continulet.__new__(continulet)
+ import pickle
+ pckl = pickle.dumps(co, self.version)
+ print repr(pckl)
+ co2 = pickle.loads(pckl)
+ assert co2 is not co
+ assert not co.is_pending()
+ assert not co2.is_pending()
+ # the empty unpickled coroutine can still be used:
+ result = [5]
+ co2.__init__(result.append)
+ res = co2.switch()
+ assert res is None
+ assert result == [5, co2]
+
+ def test_pickle_continulet_empty_subclass(self):
+ from test_pickle_continulet import continulet, A
+ lst = [4]
+ co = continulet.__new__(A)
+ co.foo = 'bar'
+ co.bar = 'baz'
+ import pickle
+ pckl = pickle.dumps(co, self.version)
+ print repr(pckl)
+ co2 = pickle.loads(pckl)
+ assert co2 is not co
+ assert not co.is_pending()
+ assert not co2.is_pending()
+ assert type(co) is type(co2) is A
+ assert co.foo == co2.foo == 'bar'
+ assert co.bar == co2.bar == 'baz'
+ # the empty unpickled coroutine can still be used:
+ result = [5]
+ co2.__init__(result.append)
+ res = co2.switch()
+ assert res is None
+ assert result == [5, co2]
+
+ def test_pickle_continulet_not_started(self):
+ from _continuation import continulet, error
+ import pickle
+ lst = []
+ co = continulet(lst.append)
+ pckl = pickle.dumps((co, lst))
+ print pckl
+ del co, lst
+ for i in range(2):
+ print 'resume...'
+ co2, lst2 = pickle.loads(pckl)
+ assert lst2 == []
+ co2.switch()
+ assert lst2 == [co2]
+
+ def test_pickle_continulet_real(self):
+ import new, sys
+ mod = new.module('test_pickle_continulet_real')
+ sys.modules['test_pickle_continulet_real'] = mod
+ mod.version = self.version
+ exec '''if 1:
+ from _continuation import continulet
+ import pickle
+ def f(co, x):
+ co.switch(x + 1)
+ co.switch(x + 2)
+ return x + 3
+ co = continulet(f, 40)
+ res = co.switch()
+ assert res == 41
+ pckl = pickle.dumps(co, version)
+ print repr(pckl)
+ co2 = pickle.loads(pckl)
+ #
+ res = co2.switch()
+ assert res == 42
+ assert co2.is_pending()
+ res = co2.switch()
+ assert res == 43
+ assert not co2.is_pending()
+ #
+ res = co.switch()
+ assert res == 42
+ assert co.is_pending()
+ res = co.switch()
+ assert res == 43
+ assert not co.is_pending()
+ ''' in mod.__dict__
+
+ def test_pickle_continulet_real_subclass(self):
+ import new, sys
+ mod = new.module('test_pickle_continulet_real_subclass')
+ sys.modules['test_pickle_continulet_real_subclass'] = mod
+ mod.version = self.version
+ exec '''if 1:
+ from _continuation import continulet
+ import pickle
+ class A(continulet):
+ def __init__(self):
+ crash
+ def f(co):
+ co.switch(co.x + 1)
+ co.switch(co.x + 2)
+ return co.x + 3
+ co = A.__new__(A)
+ continulet.__init__(co, f)
+ co.x = 40
+ res = co.switch()
+ assert res == 41
+ pckl = pickle.dumps(co, version)
+ print repr(pckl)
+ co2 = pickle.loads(pckl)
+ #
+ assert type(co2) is A
+ res = co2.switch()
+ assert res == 42
+ assert co2.is_pending()
+ res = co2.switch()
+ assert res == 43
+ assert not co2.is_pending()
+ #
+ res = co.switch()
+ assert res == 42
+ assert co.is_pending()
+ res = co.switch()
+ assert res == 43
+ assert not co.is_pending()
+ ''' in mod.__dict__
+
+
+class AppTestPickle_v1(AppTestPickle):
+ version = 1
+
+class AppTestPickle_v2(AppTestPickle):
+ version = 2
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -225,7 +225,9 @@
except OSError:
pass
- def __init__(self, fd, flags):
+ def __init__(self, space, fd, flags):
+ if fd == self.INVALID_HANDLE_VALUE or fd < 0:
+ raise OperationError(space.w_IOError, space.wrap("invalid handle %d" % fd))
W_BaseConnection.__init__(self, flags)
self.fd = fd
@@ -234,7 +236,7 @@
flags = (readable and READABLE) | (writable and WRITABLE)
self = space.allocate_instance(W_FileConnection, w_subtype)
- W_FileConnection.__init__(self, fd, flags)
+ W_FileConnection.__init__(self, space, fd, flags)
return space.wrap(self)
def fileno(self, space):
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -468,6 +468,9 @@
self.count -= 1
+ def after_fork(self):
+ self.count = 0
+
@unwrap_spec(kind=int, maxvalue=int)
def rebuild(space, w_cls, w_handle, kind, maxvalue):
self = space.allocate_instance(W_SemLock, w_cls)
@@ -512,6 +515,7 @@
acquire = interp2app(W_SemLock.acquire),
release = interp2app(W_SemLock.release),
_rebuild = interp2app(W_SemLock.rebuild.im_func, as_classmethod=True),
+ _after_fork = interp2app(W_SemLock.after_fork),
__enter__=interp2app(W_SemLock.enter),
__exit__=interp2app(W_SemLock.exit),
SEM_VALUE_MAX=SEM_VALUE_MAX,
diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py
--- a/pypy/module/_multiprocessing/test/test_connection.py
+++ b/pypy/module/_multiprocessing/test/test_connection.py
@@ -145,3 +145,9 @@
else:
c.close()
space.delslice(w_connections, space.wrap(0), space.wrap(100))
+
+ def test_bad_fd(self):
+ import _multiprocessing
+
+ raises(IOError, _multiprocessing.Connection, -1)
+ raises(IOError, _multiprocessing.Connection, -15)
\ No newline at end of file
diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py
--- a/pypy/module/_multiprocessing/test/test_semaphore.py
+++ b/pypy/module/_multiprocessing/test/test_semaphore.py
@@ -39,6 +39,10 @@
sem.release()
assert sem._count() == 0
+ sem.acquire()
+ sem._after_fork()
+ assert sem._count() == 0
+
def test_recursive(self):
from _multiprocessing import SemLock
kind = self.RECURSIVE
diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py
--- a/pypy/module/_sre/interp_sre.py
+++ b/pypy/module/_sre/interp_sre.py
@@ -99,6 +99,7 @@
# SRE_Pattern class
class W_SRE_Pattern(Wrappable):
+ _immutable_fields_ = ["code", "flags"]
def cannot_copy_w(self):
space = self.space
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -57,6 +57,9 @@
compile_extra=['-DPy_BUILD_CORE'],
)
+class CConfig2:
+ _compilation_info_ = CConfig._compilation_info_
+
class CConfig_constants:
_compilation_info_ = CConfig._compilation_info_
@@ -300,9 +303,13 @@
return unwrapper_raise # used in 'normal' RPython code.
return decorate
-def cpython_struct(name, fields, forward=None):
+def cpython_struct(name, fields, forward=None, level=1):
configname = name.replace(' ', '__')
- setattr(CConfig, configname, rffi_platform.Struct(name, fields))
+ if level == 1:
+ config = CConfig
+ else:
+ config = CConfig2
+ setattr(config, configname, rffi_platform.Struct(name, fields))
if forward is None:
forward = lltype.ForwardReference()
TYPES[configname] = forward
@@ -445,9 +452,10 @@
# 'int*': rffi.INTP}
def configure_types():
- for name, TYPE in rffi_platform.configure(CConfig).iteritems():
- if name in TYPES:
- TYPES[name].become(TYPE)
+ for config in (CConfig, CConfig2):
+ for name, TYPE in rffi_platform.configure(config).iteritems():
+ if name in TYPES:
+ TYPES[name].become(TYPE)
def build_type_checkers(type_name, cls=None):
"""
diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
--- a/pypy/module/cpyext/include/object.h
+++ b/pypy/module/cpyext/include/object.h
@@ -321,6 +321,15 @@
} PyTypeObject;
+typedef struct {
+ PyTypeObject ht_type;
+ PyNumberMethods as_number;
+ PyMappingMethods as_mapping;
+ PySequenceMethods as_sequence;
+ PyBufferProcs as_buffer;
+ PyObject *ht_name, *ht_slots;
+} PyHeapTypeObject;
+
/* Flag bits for printing: */
#define Py_PRINT_RAW 1 /* No string quotes etc. */
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -19,13 +19,42 @@
basestruct = PyObject.TO
def get_dealloc(self, space):
- raise NotImplementedError
+ from pypy.module.cpyext.typeobject import subtype_dealloc
+ return llhelper(
+ subtype_dealloc.api_func.functype,
+ subtype_dealloc.api_func.get_wrapper(space))
+
def allocate(self, space, w_type, itemcount=0):
- raise NotImplementedError
+ # similar to PyType_GenericAlloc?
+ # except that it's not related to any pypy object.
+
+ pytype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type))
+ # Don't increase refcount for non-heaptypes
+ if pytype:
+ flags = rffi.cast(lltype.Signed, pytype.c_tp_flags)
+ if not flags & Py_TPFLAGS_HEAPTYPE:
+ Py_DecRef(space, w_type)
+
+ if pytype:
+ size = pytype.c_tp_basicsize
+ else:
+ size = rffi.sizeof(self.basestruct)
+ if itemcount:
+ size += itemcount * pytype.c_tp_itemsize
+ buf = lltype.malloc(rffi.VOIDP.TO, size,
+ flavor='raw', zero=True)
+ pyobj = rffi.cast(PyObject, buf)
+ pyobj.c_ob_refcnt = 1
+ pyobj.c_ob_type = pytype
+ return pyobj
+
def attach(self, space, pyobj, w_obj):
- raise NotImplementedError
+ pass
+
def realize(self, space, ref):
- raise NotImplementedError
+ # For most types, a reference cannot exist without
+ # a real interpreter object
+ raise InvalidPointerException(str(ref))
typedescr_cache = {}
@@ -40,6 +69,7 @@
"""
tp_basestruct = kw.pop('basestruct', PyObject.TO)
+ tp_alloc = kw.pop('alloc', None)
tp_attach = kw.pop('attach', None)
tp_realize = kw.pop('realize', None)
tp_dealloc = kw.pop('dealloc', None)
@@ -49,58 +79,24 @@
class CpyTypedescr(BaseCpyTypedescr):
basestruct = tp_basestruct
- realize = tp_realize
- def get_dealloc(self, space):
- if tp_dealloc:
+ if tp_alloc:
+ def allocate(self, space, w_type, itemcount=0):
+ return tp_alloc(space, w_type)
+
+ if tp_dealloc:
+ def get_dealloc(self, space):
return llhelper(
tp_dealloc.api_func.functype,
tp_dealloc.api_func.get_wrapper(space))
- else:
- from pypy.module.cpyext.typeobject import subtype_dealloc
- return llhelper(
- subtype_dealloc.api_func.functype,
- subtype_dealloc.api_func.get_wrapper(space))
-
- def allocate(self, space, w_type, itemcount=0):
- # similar to PyType_GenericAlloc?
- # except that it's not related to any pypy object.
-
- pytype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type))
- # Don't increase refcount for non-heaptypes
- if pytype:
- flags = rffi.cast(lltype.Signed, pytype.c_tp_flags)
- if not flags & Py_TPFLAGS_HEAPTYPE:
- Py_DecRef(space, w_type)
-
- if pytype:
- size = pytype.c_tp_basicsize
- else:
- size = rffi.sizeof(tp_basestruct)
- if itemcount:
- size += itemcount * pytype.c_tp_itemsize
- buf = lltype.malloc(rffi.VOIDP.TO, size,
- flavor='raw', zero=True)
- pyobj = rffi.cast(PyObject, buf)
- pyobj.c_ob_refcnt = 1
- pyobj.c_ob_type = pytype
- return pyobj
if tp_attach:
def attach(self, space, pyobj, w_obj):
tp_attach(space, pyobj, w_obj)
- else:
- def attach(self, space, pyobj, w_obj):
- pass
if tp_realize:
def realize(self, space, ref):
return tp_realize(space, ref)
- else:
- def realize(self, space, ref):
- # For most types, a reference cannot exist without
- # a real interpreter object
- raise InvalidPointerException(str(ref))
if typedef:
CpyTypedescr.__name__ = "CpyTypedescr_%s" % (typedef.name,)
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -268,6 +268,21 @@
assert type(obj) is foo.Custom
assert type(foo.Custom) is foo.MetaType
+ def test_heaptype(self):
+ module = self.import_extension('foo', [
+ ("name_by_heaptype", "METH_O",
+ '''
+ PyHeapTypeObject *heaptype = (PyHeapTypeObject *)args;
+ Py_INCREF(heaptype->ht_name);
+ return heaptype->ht_name;
+ '''
+ )
+ ])
+ class C(object):
+ pass
+ assert module.name_by_heaptype(C) == "C"
+
+
class TestTypes(BaseApiTest):
def test_type_attributes(self, space, api):
w_class = space.appexec([], """():
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -11,7 +11,7 @@
generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING,
Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL,
Py_TPFLAGS_HAVE_GETCHARBUFFER,
- build_type_checkers)
+ build_type_checkers, PyObjectFields)
from pypy.module.cpyext.pyobject import (
PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr,
track_reference, RefcountState, borrow_from)
@@ -25,7 +25,7 @@
from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne
from pypy.module.cpyext.typeobjectdefs import (
PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc,
- PyNumberMethods, PySequenceMethods, PyBufferProcs)
+ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs)
from pypy.module.cpyext.slotdefs import (
slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function)
from pypy.interpreter.error import OperationError
@@ -39,6 +39,19 @@
PyType_Check, PyType_CheckExact = build_type_checkers("Type", "w_type")
+PyHeapTypeObjectStruct = lltype.ForwardReference()
+PyHeapTypeObject = lltype.Ptr(PyHeapTypeObjectStruct)
+PyHeapTypeObjectFields = (
+ ("ht_type", PyTypeObject),
+ ("ht_name", PyObject),
+ ("as_number", PyNumberMethods),
+ ("as_mapping", PyMappingMethods),
+ ("as_sequence", PySequenceMethods),
+ ("as_buffer", PyBufferProcs),
+ )
+cpython_struct("PyHeapTypeObject", PyHeapTypeObjectFields, PyHeapTypeObjectStruct,
+ level=2)
+
class W_GetSetPropertyEx(GetSetProperty):
def __init__(self, getset, w_type):
self.getset = getset
@@ -136,6 +149,8 @@
assert len(slot_names) == 2
struct = getattr(pto, slot_names[0])
if not struct:
+ assert not space.config.translating
+ assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE
if slot_names[0] == 'c_tp_as_number':
STRUCT_TYPE = PyNumberMethods
elif slot_names[0] == 'c_tp_as_sequence':
@@ -301,6 +316,7 @@
make_typedescr(space.w_type.instancetypedef,
basestruct=PyTypeObject,
+ alloc=type_alloc,
attach=type_attach,
realize=type_realize,
dealloc=type_dealloc)
@@ -319,11 +335,13 @@
track_reference(space, lltype.nullptr(PyObject.TO), space.w_type)
track_reference(space, lltype.nullptr(PyObject.TO), space.w_object)
track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple)
+ track_reference(space, lltype.nullptr(PyObject.TO), space.w_str)
# create the objects
py_type = create_ref(space, space.w_type)
py_object = create_ref(space, space.w_object)
py_tuple = create_ref(space, space.w_tuple)
+ py_str = create_ref(space, space.w_str)
# form cycles
pto_type = rffi.cast(PyTypeObjectPtr, py_type)
@@ -340,10 +358,15 @@
pto_object.c_tp_bases.c_ob_type = pto_tuple
pto_tuple.c_tp_bases.c_ob_type = pto_tuple
+ for typ in (py_type, py_object, py_tuple, py_str):
+ heaptype = rffi.cast(PyHeapTypeObject, typ)
+ heaptype.c_ht_name.c_ob_type = pto_type
+
# Restore the mapping
track_reference(space, py_type, space.w_type, replace=True)
track_reference(space, py_object, space.w_object, replace=True)
track_reference(space, py_tuple, space.w_tuple, replace=True)
+ track_reference(space, py_str, space.w_str, replace=True)
@cpython_api([PyObject], lltype.Void, external=False)
@@ -416,17 +439,34 @@
Py_DecRef(space, obj_pto.c_tp_cache) # let's do it like cpython
Py_DecRef(space, obj_pto.c_tp_dict)
if obj_pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE:
- if obj_pto.c_tp_as_buffer:
- lltype.free(obj_pto.c_tp_as_buffer, flavor='raw')
- if obj_pto.c_tp_as_number:
- lltype.free(obj_pto.c_tp_as_number, flavor='raw')
- if obj_pto.c_tp_as_sequence:
- lltype.free(obj_pto.c_tp_as_sequence, flavor='raw')
+ heaptype = rffi.cast(PyHeapTypeObject, obj)
+ Py_DecRef(space, heaptype.c_ht_name)
Py_DecRef(space, base_pyo)
- rffi.free_charp(obj_pto.c_tp_name)
PyObject_dealloc(space, obj)
+def type_alloc(space, w_metatype):
+ size = rffi.sizeof(PyHeapTypeObject)
+ metatype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_metatype))
+ # Don't increase refcount for non-heaptypes
+ if metatype:
+ flags = rffi.cast(lltype.Signed, metatype.c_tp_flags)
+ if not flags & Py_TPFLAGS_HEAPTYPE:
+ Py_DecRef(space, w_metatype)
+
+ heaptype = lltype.malloc(PyHeapTypeObject.TO,
+ flavor='raw', zero=True)
+ pto = heaptype.c_ht_type
+ pto.c_ob_refcnt = 1
+ pto.c_ob_type = metatype
+ pto.c_tp_flags |= Py_TPFLAGS_HEAPTYPE
+ pto.c_tp_as_number = heaptype.c_as_number
+ pto.c_tp_as_sequence = heaptype.c_as_sequence
+ pto.c_tp_as_mapping = heaptype.c_as_mapping
+ pto.c_tp_as_buffer = heaptype.c_as_buffer
+
+ return rffi.cast(PyObject, heaptype)
+
def type_attach(space, py_obj, w_type):
"""
Fills a newly allocated PyTypeObject from an existing type.
@@ -445,12 +485,18 @@
if space.is_w(w_type, space.w_str):
setup_string_buffer_procs(space, pto)
- pto.c_tp_flags |= Py_TPFLAGS_HEAPTYPE
pto.c_tp_free = llhelper(PyObject_Del.api_func.functype,
PyObject_Del.api_func.get_wrapper(space))
pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype,
PyType_GenericAlloc.api_func.get_wrapper(space))
- pto.c_tp_name = rffi.str2charp(w_type.getname(space))
+ if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE:
+ w_typename = space.getattr(w_type, space.wrap('__name__'))
+ heaptype = rffi.cast(PyHeapTypeObject, pto)
+ heaptype.c_ht_name = make_ref(space, w_typename)
+ from pypy.module.cpyext.stringobject import PyString_AsString
+ pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name)
+ else:
+ pto.c_tp_name = rffi.str2charp(w_type.getname(space))
pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out
pto.c_tp_itemsize = 0
# uninitialized fields:
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -23,6 +23,8 @@
("arccos", "arccos"),
("arcsin", "arcsin"),
("arctan", "arctan"),
+ ("arcsinh", "arcsinh"),
+ ("arctanh", "arctanh"),
("copysign", "copysign"),
("cos", "cos"),
("divide", "divide"),
@@ -50,4 +52,6 @@
appleveldefs = {
'average': 'app_numpy.average',
'mean': 'app_numpy.mean',
+ 'inf': 'app_numpy.inf',
+ 'e': 'app_numpy.e',
}
diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py
--- a/pypy/module/micronumpy/app_numpy.py
+++ b/pypy/module/micronumpy/app_numpy.py
@@ -1,5 +1,11 @@
+import math
+
import numpy
+
+inf = float("inf")
+e = math.e
+
def average(a):
# This implements a weighted average, for now we don't implement the
# weighting, just the average part!
@@ -8,4 +14,4 @@
def mean(a):
if not hasattr(a, "mean"):
a = numpy.array(a)
- return a.mean()
\ No newline at end of file
+ return a.mean()
diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
--- a/pypy/module/micronumpy/interp_dtype.py
+++ b/pypy/module/micronumpy/interp_dtype.py
@@ -7,13 +7,14 @@
from pypy.interpreter.typedef import TypeDef, interp_attrproperty, GetSetProperty
from pypy.module.micronumpy import signature
from pypy.objspace.std.floatobject import float2string
-from pypy.rlib import rfloat
-from pypy.rlib.rarithmetic import widen
+from pypy.rlib import rarithmetic, rfloat
+from pypy.rlib.rarithmetic import LONG_BIT, widen
from pypy.rlib.objectmodel import specialize, enforceargs
from pypy.rlib.unroll import unrolling_iterable
from pypy.rpython.lltypesystem import lltype, rffi
+UNSIGNEDLTR = "u"
SIGNEDLTR = "i"
BOOLLTR = "b"
FLOATINGLTR = "f"
@@ -61,7 +62,10 @@
self.val = val
def wrap(self, space):
- return space.wrap(self.val)
+ val = self.val
+ if valtype is rarithmetic.r_singlefloat:
+ val = float(val)
+ return space.wrap(val)
def convert_to(self, dtype):
return dtype.adapt_val(self.val)
@@ -145,7 +149,7 @@
return self.adapt_val(func(self, self.for_computation(self.unbox(v))))
return impl
-class ArithmaticTypeMixin(object):
+class ArithmeticTypeMixin(object):
_mixin_ = True
@binop
@@ -200,11 +204,17 @@
return v1 >= v2
-class FloatArithmeticDtype(ArithmaticTypeMixin):
+class FloatArithmeticDtype(ArithmeticTypeMixin):
_mixin_ = True
+ def unwrap(self, space, w_item):
+ return self.adapt_val(space.float_w(space.float(w_item)))
+
def for_computation(self, v):
- return v
+ return float(v)
+
+ def str_format(self, item):
+ return float2string(self.for_computation(self.unbox(item)), 'g', rfloat.DTSF_STR_PRECISION)
@binop
def mod(self, v1, v2):
@@ -250,19 +260,29 @@
return math.tan(v)
@unaryop
def arcsin(self, v):
- if v < -1.0 or v > 1.0:
+ if not -1.0 <= v <= 1.0:
return rfloat.NAN
return math.asin(v)
@unaryop
def arccos(self, v):
- if v < -1.0 or v > 1.0:
+ if not -1.0 <= v <= 1.0:
return rfloat.NAN
return math.acos(v)
@unaryop
def arctan(self, v):
return math.atan(v)
+ @unaryop
+ def arcsinh(self, v):
+ return math.asinh(v)
+ @unaryop
+ def arctanh(self, v):
+ if v == 1.0 or v == -1.0:
+ return math.copysign(rfloat.INFINITY, v)
+ if not -1.0 < v < 1.0:
+ return rfloat.NAN
+ return math.atanh(v)
-class IntegerArithmeticDtype(ArithmaticTypeMixin):
+class IntegerArithmeticDtype(ArithmeticTypeMixin):
_mixin_ = True
def unwrap(self, space, w_item):
@@ -271,10 +291,16 @@
def for_computation(self, v):
return widen(v)
+ def str_format(self, item):
+ return str(widen(self.unbox(item)))
+
@binop
def mod(self, v1, v2):
return v1 % v2
+class SignedIntegerArithmeticDtype(IntegerArithmeticDtype):
+ _mixin_ = True
+
@unaryop
def sign(self, v):
if v > 0:
@@ -285,17 +311,22 @@
assert v == 0
return 0
- def str_format(self, item):
- return str(widen(self.unbox(item)))
+class UnsignedIntegerArithmeticDtype(IntegerArithmeticDtype):
+ _mixin_ = True
+
+ @unaryop
+ def sign(self, v):
+ return int(v != 0)
+
W_BoolDtype = create_low_level_dtype(
num = 0, kind = BOOLLTR, name = "bool",
- aliases = ["?"],
+ aliases = ["?", "bool", "bool8"],
applevel_types = ["bool"],
T = lltype.Bool,
valtype = bool,
)
-class W_BoolDtype(IntegerArithmeticDtype, W_BoolDtype):
+class W_BoolDtype(SignedIntegerArithmeticDtype, W_BoolDtype):
def unwrap(self, space, w_item):
return self.adapt_val(space.is_true(w_item))
@@ -308,67 +339,139 @@
W_Int8Dtype = create_low_level_dtype(
num = 1, kind = SIGNEDLTR, name = "int8",
- aliases = ["int8"],
+ aliases = ["b", "int8", "i1"],
applevel_types = [],
T = rffi.SIGNEDCHAR,
valtype = rffi.SIGNEDCHAR._type,
expected_size = 1,
)
-class W_Int8Dtype(IntegerArithmeticDtype, W_Int8Dtype):
+class W_Int8Dtype(SignedIntegerArithmeticDtype, W_Int8Dtype):
+ pass
+
+W_UInt8Dtype = create_low_level_dtype(
+ num = 2, kind = UNSIGNEDLTR, name = "uint8",
+ aliases = ["B", "uint8", "I1"],
+ applevel_types = [],
+ T = rffi.UCHAR,
+ valtype = rffi.UCHAR._type,
+ expected_size = 1,
+)
+class W_UInt8Dtype(UnsignedIntegerArithmeticDtype, W_UInt8Dtype):
pass
W_Int16Dtype = create_low_level_dtype(
num = 3, kind = SIGNEDLTR, name = "int16",
- aliases = ["int16"],
+ aliases = ["h", "int16", "i2"],
applevel_types = [],
T = rffi.SHORT,
valtype = rffi.SHORT._type,
expected_size = 2,
)
-class W_Int16Dtype(IntegerArithmeticDtype, W_Int16Dtype):
+class W_Int16Dtype(SignedIntegerArithmeticDtype, W_Int16Dtype):
+ pass
+
+W_UInt16Dtype = create_low_level_dtype(
+ num = 4, kind = UNSIGNEDLTR, name = "uint16",
+ aliases = ["H", "uint16", "I2"],
+ applevel_types = [],
+ T = rffi.USHORT,
+ valtype = rffi.USHORT._type,
+ expected_size = 2,
+)
+class W_UInt16Dtype(UnsignedIntegerArithmeticDtype, W_UInt16Dtype):
pass
W_Int32Dtype = create_low_level_dtype(
num = 5, kind = SIGNEDLTR, name = "int32",
- aliases = ["i"],
+ aliases = ["i", "int32", "i4"],
applevel_types = [],
T = rffi.INT,
valtype = rffi.INT._type,
expected_size = 4,
)
-class W_Int32Dtype(IntegerArithmeticDtype, W_Int32Dtype):
+class W_Int32Dtype(SignedIntegerArithmeticDtype, W_Int32Dtype):
+ pass
+
+W_UInt32Dtype = create_low_level_dtype(
+ num = 6, kind = UNSIGNEDLTR, name = "uint32",
+ aliases = ["I", "uint32", "I4"],
+ applevel_types = [],
+ T = rffi.UINT,
+ valtype = rffi.UINT._type,
+ expected_size = 4,
+)
+class W_UInt32Dtype(UnsignedIntegerArithmeticDtype, W_UInt32Dtype):
pass
W_Int64Dtype = create_low_level_dtype(
num = 9, kind = SIGNEDLTR, name = "int64",
- aliases = [],
+ aliases = ["q", "int64", "i8"],
applevel_types = ["long"],
T = rffi.LONGLONG,
valtype = rffi.LONGLONG._type,
expected_size = 8,
)
-class W_Int64Dtype(IntegerArithmeticDtype, W_Int64Dtype):
+class W_Int64Dtype(SignedIntegerArithmeticDtype, W_Int64Dtype):
+ pass
+
+W_UInt64Dtype = create_low_level_dtype(
+ num = 10, kind = UNSIGNEDLTR, name = "uint64",
+ aliases = ["Q", "uint64", "I8"],
+ applevel_types = [],
+ T = rffi.ULONGLONG,
+ valtype = rffi.ULONGLONG._type,
+ expected_size = 8,
+)
+class W_UInt64Dtype(UnsignedIntegerArithmeticDtype, W_UInt64Dtype):
+ pass
+
+if LONG_BIT == 32:
+ class W_LongDtype(W_Int32Dtype):
+ pass
+
+ class W_ULongDtype(W_UInt32Dtype):
+ pass
+else:
+ class W_LongDtype(W_Int64Dtype):
+ pass
+
+ class W_ULongDtype(W_UInt64Dtype):
+ pass
+
+W_LongDtype.num = 7
+W_LongDtype.aliases = ["l"]
+W_LongDtype.applevel_types = ["int"]
+W_ULongDtype.num = 8
+W_ULongDtype.aliases = ["L"]
+
+W_Float32Dtype = create_low_level_dtype(
+ num = 11, kind = FLOATINGLTR, name = "float32",
+ aliases = ["f", "float32", "f4"],
+ applevel_types = [],
+ T = lltype.SingleFloat,
+ valtype = rarithmetic.r_singlefloat,
+ expected_size = 4,
+)
+class W_Float32Dtype(FloatArithmeticDtype, W_Float32Dtype):
pass
W_Float64Dtype = create_low_level_dtype(
num = 12, kind = FLOATINGLTR, name = "float64",
- aliases = [],
+ aliases = ["d", "float64", "f8"],
applevel_types = ["float"],
T = lltype.Float,
valtype = float,
expected_size = 8,
)
class W_Float64Dtype(FloatArithmeticDtype, W_Float64Dtype):
- def unwrap(self, space, w_item):
- return self.adapt_val(space.float_w(space.float(w_item)))
-
- def str_format(self, item):
- return float2string(self.unbox(item), 'g', rfloat.DTSF_STR_PRECISION)
+ pass
ALL_DTYPES = [
W_BoolDtype,
- W_Int8Dtype, W_Int16Dtype, W_Int32Dtype, W_Int64Dtype,
- W_Float64Dtype
+ W_Int8Dtype, W_UInt8Dtype, W_Int16Dtype, W_UInt16Dtype,
+ W_Int32Dtype, W_UInt32Dtype, W_LongDtype, W_ULongDtype,
+ W_Int64Dtype, W_UInt64Dtype,
+ W_Float32Dtype, W_Float64Dtype,
]
dtypes_by_alias = unrolling_iterable([
@@ -395,6 +498,7 @@
num = interp_attrproperty("num", cls=W_Dtype),
kind = interp_attrproperty("kind", cls=W_Dtype),
+ itemsize = interp_attrproperty("num_bytes", cls=W_Dtype),
shape = GetSetProperty(W_Dtype.descr_get_shape),
)
W_Dtype.typedef.acceptable_as_base_class = False
diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
--- a/pypy/module/micronumpy/interp_ufuncs.py
+++ b/pypy/module/micronumpy/interp_ufuncs.py
@@ -4,6 +4,7 @@
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.module.micronumpy import interp_dtype, signature
from pypy.rlib import jit
+from pypy.rlib.rarithmetic import LONG_BIT
from pypy.tool.sourcetools import func_with_new_name
@@ -180,23 +181,56 @@
# Everything promotes to float, and bool promotes to everything.
if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR:
+ # Float32 + 8-bit int = Float64
+ if dt2.num == 11 and dt1.num_bytes >= 4:
+ return space.fromcache(interp_dtype.W_Float64Dtype)
return dt2
- assert False
+ # for now this means mixing signed and unsigned
+ if dt2.kind == interp_dtype.SIGNEDLTR:
+ # if dt2 has a greater number of bytes, then just go with it
+ if dt1.num_bytes < dt2.num_bytes:
+ return dt2
+ # we need to promote both dtypes
+ dtypenum = dt2.num + 2
+ else:
+ # increase to the next signed type (or to float)
+ dtypenum = dt2.num + 1
+ # UInt64 + signed = Float64
+ if dt2.num == 10:
+ dtypenum += 1
+ newdtype = interp_dtype.ALL_DTYPES[dtypenum]
+
+ if newdtype.num_bytes > dt2.num_bytes or newdtype.kind == interp_dtype.FLOATINGLTR:
+ return space.fromcache(newdtype)
+ else:
+ # we only promoted to long on 32-bit or to longlong on 64-bit
+ # this is really for dealing with the Long and Ulong dtypes
+ if LONG_BIT == 32:
+ dtypenum += 2
+ else:
+ dtypenum += 3
+ return space.fromcache(interp_dtype.ALL_DTYPES[dtypenum])
def find_unaryop_result_dtype(space, dt, promote_to_float=False,
promote_bools=False, promote_to_largest=False):
if promote_bools and (dt.kind == interp_dtype.BOOLLTR):
return space.fromcache(interp_dtype.W_Int8Dtype)
if promote_to_float:
+ if dt.kind == interp_dtype.FLOATINGLTR:
+ return dt
+ if dt.num >= 5:
+ return space.fromcache(interp_dtype.W_Float64Dtype)
for bytes, dtype in interp_dtype.dtypes_by_num_bytes:
- if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes >= dt.num_bytes:
+ if dtype.kind == interp_dtype.FLOATINGLTR and dtype.num_bytes > dt.num_bytes:
return space.fromcache(dtype)
if promote_to_largest:
if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR:
return space.fromcache(interp_dtype.W_Int64Dtype)
elif dt.kind == interp_dtype.FLOATINGLTR:
return space.fromcache(interp_dtype.W_Float64Dtype)
+ elif dt.kind == interp_dtype.UNSIGNEDLTR:
+ return space.fromcache(interp_dtype.W_UInt64Dtype)
else:
assert False
return dt
@@ -205,15 +239,23 @@
w_type = space.type(w_obj)
bool_dtype = space.fromcache(interp_dtype.W_BoolDtype)
+ long_dtype = space.fromcache(interp_dtype.W_LongDtype)
int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype)
if space.is_w(w_type, space.w_bool):
if current_guess is None or current_guess is bool_dtype:
return bool_dtype
+ return current_guess
elif space.is_w(w_type, space.w_int):
if (current_guess is None or current_guess is bool_dtype or
- current_guess is int64_dtype):
+ current_guess is long_dtype):
+ return long_dtype
+ return current_guess
+ elif space.is_w(w_type, space.w_long):
+ if (current_guess is None or current_guess is bool_dtype or
+ current_guess is long_dtype or current_guess is int64_dtype):
return int64_dtype
+ return current_guess
return space.fromcache(interp_dtype.W_Float64Dtype)
@@ -225,7 +267,9 @@
def impl(res_dtype, lvalue, rvalue):
res = getattr(res_dtype, op_name)(lvalue, rvalue)
if comparison_func:
- res = space.fromcache(interp_dtype.W_BoolDtype).box(res)
+ booldtype = space.fromcache(interp_dtype.W_BoolDtype)
+ assert isinstance(booldtype, interp_dtype.W_BoolDtype)
+ res = booldtype.box(res)
return res
return func_with_new_name(impl, ufunc_name)
@@ -268,6 +312,8 @@
("arcsin", "arcsin", 1, {"promote_to_float": True}),
("arccos", "arccos", 1, {"promote_to_float": True}),
("arctan", "arctan", 1, {"promote_to_float": True}),
+ ("arcsinh", "arcsinh", 1, {"promote_to_float": True}),
+ ("arctanh", "arctanh", 1, {"promote_to_float": True}),
]:
self.add_ufunc(space, *ufunc_def)
@@ -277,7 +323,7 @@
identity = extra_kwargs.get("identity")
if identity is not None:
- identity = space.fromcache(interp_dtype.W_Int64Dtype).adapt_val(identity)
+ identity = space.fromcache(interp_dtype.W_LongDtype).adapt_val(identity)
extra_kwargs["identity"] = identity
func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount,
diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py
--- a/pypy/module/micronumpy/test/test_base.py
+++ b/pypy/module/micronumpy/test/test_base.py
@@ -64,18 +64,46 @@
def test_unaryops(self, space):
bool_dtype = space.fromcache(interp_dtype.W_BoolDtype)
int8_dtype = space.fromcache(interp_dtype.W_Int8Dtype)
+ uint8_dtype = space.fromcache(interp_dtype.W_UInt8Dtype)
+ int16_dtype = space.fromcache(interp_dtype.W_Int16Dtype)
+ uint16_dtype = space.fromcache(interp_dtype.W_UInt16Dtype)
int32_dtype = space.fromcache(interp_dtype.W_Int32Dtype)
+ uint32_dtype = space.fromcache(interp_dtype.W_UInt32Dtype)
+ long_dtype = space.fromcache(interp_dtype.W_LongDtype)
+ ulong_dtype = space.fromcache(interp_dtype.W_ULongDtype)
+ int64_dtype = space.fromcache(interp_dtype.W_Int64Dtype)
+ uint64_dtype = space.fromcache(interp_dtype.W_UInt64Dtype)
+ float32_dtype = space.fromcache(interp_dtype.W_Float32Dtype)
float64_dtype = space.fromcache(interp_dtype.W_Float64Dtype)
- # Normal rules, everythign returns itself
+ # Normal rules, everything returns itself
assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype
assert find_unaryop_result_dtype(space, int8_dtype) is int8_dtype
+ assert find_unaryop_result_dtype(space, uint8_dtype) is uint8_dtype
+ assert find_unaryop_result_dtype(space, int16_dtype) is int16_dtype
+ assert find_unaryop_result_dtype(space, uint16_dtype) is uint16_dtype
assert find_unaryop_result_dtype(space, int32_dtype) is int32_dtype
+ assert find_unaryop_result_dtype(space, uint32_dtype) is uint32_dtype
+ assert find_unaryop_result_dtype(space, long_dtype) is long_dtype
+ assert find_unaryop_result_dtype(space, ulong_dtype) is ulong_dtype
+ assert find_unaryop_result_dtype(space, int64_dtype) is int64_dtype
+ assert find_unaryop_result_dtype(space, uint64_dtype) is uint64_dtype
+ assert find_unaryop_result_dtype(space, float32_dtype) is float32_dtype
assert find_unaryop_result_dtype(space, float64_dtype) is float64_dtype
# Coerce to floats, some of these will eventually be float16, or
# whatever our smallest float type is.
- assert find_unaryop_result_dtype(space, bool_dtype, promote_to_float=True) is float64_dtype
- assert find_unaryop_result_dtype(space, int8_dtype, promote_to_float=True) is float64_dtype
+ assert find_unaryop_result_dtype(space, bool_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in
+ assert find_unaryop_result_dtype(space, int8_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in
+ assert find_unaryop_result_dtype(space, uint8_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in
+ assert find_unaryop_result_dtype(space, int16_dtype, promote_to_float=True) is float32_dtype
+ assert find_unaryop_result_dtype(space, uint16_dtype, promote_to_float=True) is float32_dtype
assert find_unaryop_result_dtype(space, int32_dtype, promote_to_float=True) is float64_dtype
- assert find_unaryop_result_dtype(space, float64_dtype, promote_to_float=True) is float64_dtype
\ No newline at end of file
+ assert find_unaryop_result_dtype(space, uint32_dtype, promote_to_float=True) is float64_dtype
+ assert find_unaryop_result_dtype(space, int64_dtype, promote_to_float=True) is float64_dtype
+ assert find_unaryop_result_dtype(space, uint64_dtype, promote_to_float=True) is float64_dtype
+ assert find_unaryop_result_dtype(space, float32_dtype, promote_to_float=True) is float32_dtype
+ assert find_unaryop_result_dtype(space, float64_dtype, promote_to_float=True) is float64_dtype
+
+ # promote bools, happens with sign ufunc
+ assert find_unaryop_result_dtype(space, bool_dtype, promote_bools=True) is int8_dtype
diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -17,6 +17,7 @@
from numpy import dtype
assert dtype(bool).num == 0
+ assert dtype(int).num == 7
assert dtype(long).num == 9
assert dtype(float).num == 12
@@ -81,6 +82,48 @@
assert isinstance(a[i], (int, long))
assert a[1] == 1
+ def test_overflow(self):
+ from numpy import array, dtype
+ assert array([128], 'b')[0] == -128
+ assert array([256], 'B')[0] == 0
+ assert array([32768], 'h')[0] == -32768
+ assert array([65536], 'H')[0] == 0
+ if dtype('l').itemsize == 4: # 32-bit
+ raises(OverflowError, "array([2**32/2], 'i')")
+ raises(OverflowError, "array([2**32], 'I')")
+ raises(OverflowError, "array([2**64/2], 'q')")
+ raises(OverflowError, "array([2**64], 'Q')")
+
+ def test_bool_binop_types(self):
+ from numpy import array, dtype
+ types = ('?','b','B','h','H','i','I','l','L','q','Q','f','d')
+ N = len(types)
+ a = array([True], '?')
+ for t in types:
+ assert (a + array([0], t)).dtype is dtype(t)
+
+ def test_binop_types(self):
+ from numpy import array, dtype
+ tests = [('b','B','h'), ('b','h','h'), ('b','H','i'), ('b','i','i'),
+ ('b','l','l'), ('b','q','q'), ('b','Q','d'), ('B','h','h'),
+ ('B','H','H'), ('B','i','i'), ('B','I','I'), ('B','l','l'),
+ ('B','L','L'), ('B','q','q'), ('B','Q','Q'), ('h','H','i'),
+ ('h','i','i'), ('h','l','l'), ('h','q','q'), ('h','Q','d'),
+ ('H','i','i'), ('H','I','I'), ('H','l','l'), ('H','L','L'),
+ ('H','q','q'), ('H','Q','Q'), ('i','l','l'), ('i','q','q'),
+ ('i','Q','d'), ('I','L','L'), ('I','q','q'), ('I','Q','Q'),
+ ('q','Q','d'), ('b','f','f'), ('B','f','f'), ('h','f','f'),
+ ('H','f','f'), ('i','f','d'), ('I','f','d'), ('l','f','d'),
+ ('L','f','d'), ('q','f','d'), ('Q','f','d'), ('q','d','d')]
+ if dtype('i').itemsize == dtype('l').itemsize: # 32-bit
+ tests.extend([('b','I','q'), ('b','L','q'), ('h','I','q'),
+ ('h','L','q'), ('i','I','q'), ('i','L','q')])
+ else:
+ tests.extend([('b','I','l'), ('b','L','d'), ('h','I','l'),
+ ('h','L','d'), ('i','I','l'), ('i','L','d')])
+ for d1, d2, dout in tests:
+ assert (array([1], d1) + array([1], d2)).dtype is dtype(dout)
+
def test_add_int8(self):
from numpy import array, dtype
@@ -99,6 +142,15 @@
for i in range(5):
assert b[i] == i * 2
+ def test_add_uint32(self):
+ from numpy import array, dtype
+
+ a = array(range(5), dtype="I")
+ b = a + a
+ assert b.dtype is dtype("I")
+ for i in range(5):
+ assert b[i] == i * 2
+
def test_shape(self):
from numpy import dtype
diff --git a/pypy/module/micronumpy/test/test_module.py b/pypy/module/micronumpy/test/test_module.py
--- a/pypy/module/micronumpy/test/test_module.py
+++ b/pypy/module/micronumpy/test/test_module.py
@@ -10,4 +10,12 @@
def test_average(self):
from numpy import array, average
assert average(range(10)) == 4.5
- assert average(array(range(10))) == 4.5
\ No newline at end of file
+ assert average(array(range(10))) == 4.5
+
+ def test_constants(self):
+ import math
+ from numpy import inf, e
+ assert type(inf) is float
+ assert inf == float("inf")
+ assert e == math.e
+ assert type(e) is float
\ No newline at end of file
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -581,8 +581,10 @@
from numpy import array, dtype
assert array([True]).dtype is dtype(bool)
- assert array([True, 1]).dtype is dtype(long)
- assert array([1, 2, 3]).dtype is dtype(long)
+ assert array([True, False]).dtype is dtype(bool)
+ assert array([True, 1]).dtype is dtype(int)
+ assert array([1, 2, 3]).dtype is dtype(int)
+ assert array([1L, 2, 3]).dtype is dtype(long)
assert array([1.2, True]).dtype is dtype(float)
assert array([1.2, 5]).dtype is dtype(float)
assert array([]).dtype is dtype(float)
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -234,7 +234,7 @@
assert b[i] == math.sin(a[i])
a = sin(array([True, False], dtype=bool))
- assert a[0] == sin(1)
+ assert abs(a[0] - sin(1)) < 1e-7 # a[0] will be less precise
assert a[1] == 0.0
def test_cos(self):
@@ -298,6 +298,25 @@
b = arctan(a)
assert math.isnan(b[0])
+ def test_arcsinh(self):
+ import math
+ from numpy import arcsinh, inf
+
+ for v in [inf, -inf, 1.0, math.e]:
+ assert math.asinh(v) == arcsinh(v)
+ assert math.isnan(arcsinh(float("nan")))
+
+ def test_arctanh(self):
+ import math
+ from numpy import arctanh
+
+ for v in [.99, .5, 0, -.5, -.99]:
+ assert math.atanh(v) == arctanh(v)
+ for v in [2.0, -2.0]:
+ assert math.isnan(arctanh(v))
+ for v in [1.0, -1.0]:
+ assert arctanh(v) == math.copysign(float("inf"), v)
+
def test_reduce_errors(self):
from numpy import sin, add
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -1,20 +1,24 @@
from pypy.jit.metainterp.test.support import LLJitMixin
from pypy.module.micronumpy import interp_ufuncs, signature
from pypy.module.micronumpy.compile import (numpy_compile, FakeSpace,
- FloatObject)
-from pypy.module.micronumpy.interp_dtype import W_Float64Dtype, W_Int64Dtype
+ FloatObject, IntObject)
+from pypy.module.micronumpy.interp_dtype import W_Int32Dtype, W_Float64Dtype, W_Int64Dtype, W_UInt64Dtype
from pypy.module.micronumpy.interp_numarray import (BaseArray, SingleDimArray,
SingleDimSlice, scalar_w)
from pypy.rlib.nonconst import NonConstant
from pypy.rpython.annlowlevel import llstr
from pypy.rpython.test.test_llinterp import interpret
+import py
+
class TestNumpyJIt(LLJitMixin):
def setup_class(cls):
cls.space = FakeSpace()
cls.float64_dtype = cls.space.fromcache(W_Float64Dtype)
cls.int64_dtype = cls.space.fromcache(W_Int64Dtype)
+ cls.uint64_dtype = cls.space.fromcache(W_UInt64Dtype)
+ cls.int32_dtype = cls.space.fromcache(W_Int32Dtype)
def test_add(self):
def f(i):
@@ -303,6 +307,31 @@
'int_lt': 1, 'guard_true': 1, 'jump': 1})
assert result == 11.0
+ def test_int32_sum(self):
+ py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to "
+ "deal correctly with int dtypes for this test to "
+ "work. skip for now until someone feels up to the task")
+ space = self.space
+ float64_dtype = self.float64_dtype
+ int32_dtype = self.int32_dtype
+
+ def f(n):
+ if NonConstant(False):
+ dtype = float64_dtype
+ else:
+ dtype = int32_dtype
+ ar = SingleDimArray(n, dtype=dtype)
+ i = 0
+ while i < n:
+ ar.get_concrete().setitem(i, int32_dtype.box(7))
+ i += 1
+ v = ar.descr_add(space, ar).descr_sum(space)
+ assert isinstance(v, IntObject)
+ return v.intval
+
+ result = self.meta_interp(f, [5], listops=True, backendopt=True)
+ assert result == f(5)
+
class TestTranslation(object):
def test_compile(self):
x = numpy_compile('aa+f*f/a-', 10)
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -13,7 +13,6 @@
from pypy.interpreter.pyframe import PyFrame
from pypy.interpreter.pyopcode import ExitFrame
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.baseobjspace import ObjSpace, W_Root
from opcode import opmap
from pypy.rlib.nonconst import NonConstant
from pypy.jit.metainterp.resoperation import rop
@@ -221,7 +220,6 @@
def __init__(self, space):
self.w_compile_hook = space.w_None
- at unwrap_spec(ObjSpace, W_Root)
def set_compile_hook(space, w_hook):
""" set_compile_hook(hook)
diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py
--- a/pypy/module/pypyjit/test/test_policy.py
+++ b/pypy/module/pypyjit/test/test_policy.py
@@ -3,8 +3,8 @@
pypypolicy = policy.PyPyJitPolicy()
def test_id_any():
- from pypy.objspace.std.default import id__ANY
- assert pypypolicy.look_inside_function(id__ANY)
+ from pypy.objspace.std.intobject import add__Int_Int
+ assert pypypolicy.look_inside_function(add__Int_Int)
def test_bigint():
from pypy.rlib.rbigint import rbigint
diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
--- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
@@ -50,8 +50,7 @@
cmdline.append(str(self.filepath))
#
print cmdline, logfile
- env={'PYPYLOG': 'jit-log-opt,jit-summary:' + str(logfile)}
- #env={'PYPYLOG': ':' + str(logfile)}
+ env={'PYPYLOG': 'jit-log-opt,jit-log-noopt,jit-summary:' + str(logfile)}
pipe = subprocess.Popen(cmdline,
env=env,
stdout=subprocess.PIPE,
diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py
--- a/pypy/module/pypyjit/test_pypy_c/test_call.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_call.py
@@ -366,12 +366,12 @@
# make sure that the "block" is not allocated
...
i20 = force_token()
- setfield_gc(p0, i20, descr=<SignedFieldDescr .*PyFrame.vable_token .*>)
p22 = new_with_vtable(19511408)
p24 = new_array(1, descr=<GcPtrArrayDescr>)
p26 = new_with_vtable(ConstClass(W_ListObject))
p27 = new(descr=<SizeDescr .*>)
p29 = new_array(0, descr=<GcPtrArrayDescr>)
+ setfield_gc(p0, i20, descr=<SignedFieldDescr .*PyFrame.vable_token .*>)
setfield_gc(p27, p29, descr=<GcPtrFieldDescr list.items .*>)
setfield_gc(p26, p27, descr=<.* .*W_ListObject.inst_wrappeditems .*>)
setarrayitem_gc(p24, 0, p26, descr=<GcPtrArrayDescr>)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py
--- a/pypy/module/pypyjit/test_pypy_c/test_containers.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py
@@ -49,3 +49,24 @@
p33 = call(ConstClass(ll_get_value__dicttablePtr_Signed), p18, i28, descr=...)
...
""")
+
+ def test_list(self):
+ def main(n):
+ i = 0
+ while i < n:
+ z = list(())
+ z.append(1)
+ i += z[-1] / len(z)
+ return i
+
+ log = self.run(main, [1000])
+ assert log.result == main(1000)
+ loop, = log.loops_by_filename(self.filepath)
+ assert loop.match("""
+ i7 = int_lt(i5, i6)
+ guard_true(i7, descr=...)
+ guard_not_invalidated(descr=...)
+ i9 = int_add(i5, 1)
+ --TICK--
+ jump(..., descr=...)
+ """)
\ No newline at end of file
diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py
--- a/pypy/module/pypyjit/test_pypy_c/test_generators.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py
@@ -19,8 +19,8 @@
assert loop.match_by_id("generator", """
i16 = force_token()
p45 = new_with_vtable(ConstClass(W_IntObject))
- setfield_gc(p45, i29, descr=<SignedFieldDescr .*>)
i47 = arraylen_gc(p8, descr=<GcPtrArrayDescr>) # Should be removed by backend
setarrayitem_gc(p8, 0, p45, descr=<GcPtrArrayDescr>)
+ setfield_gc(p45, i29, descr=<SignedFieldDescr .*>)
jump(..., descr=...)
""")
diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py
--- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
@@ -125,8 +125,8 @@
i12 = force_token()
--TICK--
p20 = new_with_vtable(ConstClass(W_IntObject))
+ setfield_gc(ConstPtr(ptr21), p20, descr=<GcPtrFieldDescr .*TypeCell.inst_w_value .*>)
setfield_gc(p20, i11, descr=<SignedFieldDescr.*W_IntObject.inst_intval .*>)
- setfield_gc(ConstPtr(ptr21), p20, descr=<GcPtrFieldDescr .*TypeCell.inst_w_value .*>)
jump(p0, p1, p2, p3, p4, p20, p6, i7, p20, descr=<Loop.>)
""")
diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py
--- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py
@@ -42,7 +42,7 @@
assert len(guards) < 20
assert loop.match_by_id('max',"""
...
- p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...)
+ p76 = call_may_force(ConstClass(min_max_trampoline), _, _, descr=...)
...
""")
@@ -63,6 +63,6 @@
assert len(guards) < 20
assert loop.match_by_id('max',"""
...
- p76 = call_may_force(ConstClass(min_max_loop__max), _, _, descr=...)
+ p76 = call_may_force(ConstClass(min_max_trampoline), _, _, descr=...)
...
""")
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -55,6 +55,7 @@
'exc_info' : 'vm.exc_info',
'exc_clear' : 'vm.exc_clear',
'settrace' : 'vm.settrace',
+ 'gettrace' : 'vm.gettrace',
'setprofile' : 'vm.setprofile',
'getprofile' : 'vm.getprofile',
'call_tracing' : 'vm.call_tracing',
diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
--- a/pypy/module/sys/test/test_sysmodule.py
+++ b/pypy/module/sys/test/test_sysmodule.py
@@ -478,6 +478,7 @@
sys.settrace(trace)
try:
x()
+ assert sys.gettrace() is trace
finally:
sys.settrace(None)
assert len(counts) == 1
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -129,14 +129,19 @@
function call. See the debugger chapter in the library manual."""
space.getexecutioncontext().settrace(w_func)
+def gettrace(space):
+ """Return the global debug tracing function set with sys.settrace.
+See the debugger chapter in the library manual."""
+ return space.getexecutioncontext().gettrace()
+
def setprofile(space, w_func):
"""Set the profiling function. It will be called on each function call
and return. See the profiler chapter in the library manual."""
space.getexecutioncontext().setprofile(w_func)
def getprofile(space):
- """Set the profiling function. It will be called on each function call
-and return. See the profiler chapter in the library manual."""
+ """Return the profiling function set with sys.setprofile.
+See the profiler chapter in the library manual."""
w_func = space.getexecutioncontext().getprofile()
if w_func is not None:
return w_func
diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py
--- a/pypy/module/test_lib_pypy/test_greenlet.py
+++ b/pypy/module/test_lib_pypy/test_greenlet.py
@@ -258,3 +258,25 @@
assert sys.exc_info() == (None, None, None)
greenlet(f).switch()
+
+ def test_gr_frame(self):
+ from greenlet import greenlet
+ import sys
+ def f2():
+ assert g.gr_frame is None
+ gmain.switch()
+ assert g.gr_frame is None
+ def f1():
+ assert gmain.gr_frame is gmain_frame
+ assert g.gr_frame is None
+ f2()
+ assert g.gr_frame is None
+ gmain = greenlet.getcurrent()
+ assert gmain.gr_frame is None
+ gmain_frame = sys._getframe()
+ g = greenlet(f1)
+ assert g.gr_frame is None
+ g.switch()
+ assert g.gr_frame.f_code.co_name == 'f2'
+ g.switch()
+ assert g.gr_frame is None
diff --git a/pypy/module/test_lib_pypy/test_stackless_pickle.py b/pypy/module/test_lib_pypy/test_stackless_pickle.py
--- a/pypy/module/test_lib_pypy/test_stackless_pickle.py
+++ b/pypy/module/test_lib_pypy/test_stackless_pickle.py
@@ -1,25 +1,27 @@
-import py; py.test.skip("XXX port me")
+import py
+py.test.skip("in-progress, maybe")
from pypy.conftest import gettestobjspace, option
class AppTest_Stackless:
def setup_class(cls):
- import py.test
- py.test.importorskip('greenlet')
- space = gettestobjspace(usemodules=('_stackless', '_socket'))
+ space = gettestobjspace(usemodules=('_continuation', '_socket'))
cls.space = space
- # cannot test the unpickle part on top of py.py
+ if option.runappdirect:
+ cls.w_lev = space.wrap(14)
+ else:
+ cls.w_lev = space.wrap(2)
def test_pickle(self):
import new, sys
mod = new.module('mod')
sys.modules['mod'] = mod
+ mod.lev = self.lev
try:
exec '''
import pickle, sys
import stackless
-lev = 14
ch = stackless.channel()
seen = []
diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py
--- a/pypy/module/thread/gil.py
+++ b/pypy/module/thread/gil.py
@@ -16,7 +16,7 @@
class GILThreadLocals(OSThreadLocals):
"""A version of OSThreadLocals that enforces a GIL."""
- ll_GIL = thread.null_ll_lock
+ gil_ready = False
def initialize(self, space):
# add the GIL-releasing callback as an action on the space
@@ -25,12 +25,10 @@
def setup_threads(self, space):
"""Enable threads in the object space, if they haven't already been."""
- if not self.ll_GIL:
- try:
- self.ll_GIL = thread.allocate_ll_lock()
- except thread.error:
+ if not self.gil_ready:
+ if not thread.gil_allocate():
raise wrap_thread_error(space, "can't allocate GIL")
- thread.acquire_NOAUTO(self.ll_GIL, True)
+ self.gil_ready = True
self.enter_thread(space) # setup the main thread
result = True
else:
@@ -44,19 +42,16 @@
# test_lock_again after the global state was cleared by
# test_compile_lock. As a workaround, we repatch these global
# fields systematically.
- spacestate.ll_GIL = self.ll_GIL
invoke_around_extcall(before_external_call, after_external_call)
return result
def reinit_threads(self, space):
- if self.ll_GIL:
- self.ll_GIL = thread.allocate_ll_lock()
- thread.acquire_NOAUTO(self.ll_GIL, True)
- self.enter_thread(space)
+ if self.gil_ready:
+ self.gil_ready = False
+ self.setup_threads(space)
def yield_thread(self):
- thread.yield_thread() # explicitly release the gil (used by test_gil)
-
+ do_yield_thread()
class GILReleaseAction(PeriodicAsyncAction):
"""An action called every sys.checkinterval bytecodes. It releases
@@ -64,16 +59,12 @@
"""
def perform(self, executioncontext, frame):
- # Other threads can run between the release() and the acquire()
- # implicit in the following external function call (which has
- # otherwise no effect).
- thread.yield_thread()
+ do_yield_thread()
class SpaceState:
def _freeze_(self):
- self.ll_GIL = thread.null_ll_lock
self.action_after_thread_switch = None
# ^^^ set by AsyncAction.fire_after_thread_switch()
return False
@@ -95,14 +86,14 @@
# this function must not raise, in such a way that the exception
# transformer knows that it cannot raise!
e = get_errno()
- thread.release_NOAUTO(spacestate.ll_GIL)
+ thread.gil_release()
set_errno(e)
before_external_call._gctransformer_hint_cannot_collect_ = True
before_external_call._dont_reach_me_in_del_ = True
def after_external_call():
e = get_errno()
- thread.acquire_NOAUTO(spacestate.ll_GIL, True)
+ thread.gil_acquire()
thread.gc_thread_run()
spacestate.after_thread_switch()
set_errno(e)
@@ -115,3 +106,18 @@
# pointers in the shadow stack. This is necessary because the GIL is
# not held after the call to before_external_call() or before the call
# to after_external_call().
+
+def do_yield_thread():
+ # explicitly release the gil, in a way that tries to give more
+ # priority to other threads (as opposed to continuing to run in
+ # the same thread).
+ if thread.gil_yield_thread():
+ thread.gc_thread_run()
+ spacestate.after_thread_switch()
+do_yield_thread._gctransformer_hint_close_stack_ = True
+do_yield_thread._dont_reach_me_in_del_ = True
+do_yield_thread._dont_inline_ = True
+
+# do_yield_thread() needs a different hint: _gctransformer_hint_close_stack_.
+# The *_external_call() functions are themselves called only from the rffi
+# module from a helper function that also has this hint.
diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py
--- a/pypy/module/thread/ll_thread.py
+++ b/pypy/module/thread/ll_thread.py
@@ -17,7 +17,8 @@
include_dirs = [str(py.path.local(autopath.pypydir).join('translator', 'c'))],
export_symbols = ['RPyThreadGetIdent', 'RPyThreadLockInit',
'RPyThreadAcquireLock', 'RPyThreadReleaseLock',
- 'RPyThreadYield',
+ 'RPyGilAllocate', 'RPyGilYieldThread',
+ 'RPyGilRelease', 'RPyGilAcquire',
'RPyThreadGetStackSize', 'RPyThreadSetStackSize',
'RPyOpaqueDealloc_ThreadLock',
'RPyThreadAfterFork']
@@ -69,8 +70,16 @@
[TLOCKP], lltype.Void,
_nowrapper=True)
-# this function does nothing apart from releasing the GIL temporarily.
-yield_thread = llexternal('RPyThreadYield', [], lltype.Void, threadsafe=True)
+# these functions manipulate directly the GIL, whose definition does not
+# escape the C code itself
+gil_allocate = llexternal('RPyGilAllocate', [], lltype.Signed,
+ _nowrapper=True)
+gil_yield_thread = llexternal('RPyGilYieldThread', [], lltype.Signed,
+ _nowrapper=True)
+gil_release = llexternal('RPyGilRelease', [], lltype.Void,
+ _nowrapper=True)
+gil_acquire = llexternal('RPyGilAcquire', [], lltype.Void,
+ _nowrapper=True)
def allocate_lock():
return Lock(allocate_ll_lock())
diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py
--- a/pypy/module/thread/test/test_gil.py
+++ b/pypy/module/thread/test/test_gil.py
@@ -30,19 +30,34 @@
use_threads = True
bigtest = False
- def test_one_thread(self):
+ def test_one_thread(self, skew=+1):
+ from pypy.rlib.debug import debug_print
if self.bigtest:
- N = 1000000
+ N = 100000
+ skew *= 25000
else:
N = 100
+ skew *= 25
space = FakeSpace()
class State:
pass
state = State()
- def runme():
- for i in range(N):
+ def runme(main=False):
+ j = 0
+ for i in range(N + [-skew, skew][main]):
+ state.datalen1 += 1 # try to crash if the GIL is not
+ state.datalen2 += 1 # correctly acquired
state.data.append((thread.get_ident(), i))
+ state.datalen3 += 1
+ state.datalen4 += 1
+ assert state.datalen1 == len(state.data)
+ assert state.datalen2 == len(state.data)
+ assert state.datalen3 == len(state.data)
+ assert state.datalen4 == len(state.data)
+ debug_print(main, i, state.datalen4)
state.threadlocals.yield_thread()
+ assert i == j
+ j += 1
def bootstrap():
try:
runme()
@@ -50,20 +65,26 @@
thread.gc_thread_die()
def f():
state.data = []
+ state.datalen1 = 0
+ state.datalen2 = 0
+ state.datalen3 = 0
+ state.datalen4 = 0
state.threadlocals = gil.GILThreadLocals()
state.threadlocals.setup_threads(space)
thread.gc_thread_prepare()
subident = thread.start_new_thread(bootstrap, ())
mainident = thread.get_ident()
- runme()
+ runme(True)
still_waiting = 3000
while len(state.data) < 2*N:
+ debug_print(len(state.data))
if not still_waiting:
raise ValueError("time out")
still_waiting -= 1
if not we_are_translated(): gil.before_external_call()
time.sleep(0.01)
if not we_are_translated(): gil.after_external_call()
+ debug_print("leaving!")
i1 = i2 = 0
for tid, i in state.data:
if tid == mainident:
@@ -72,14 +93,17 @@
assert i == i2; i2 += 1
else:
assert 0
- assert i1 == N
- assert i2 == N
+ assert i1 == N + skew
+ assert i2 == N - skew
return len(state.data)
fn = self.getcompiled(f, [])
res = fn()
assert res == 2*N
+ def test_one_thread_rev(self):
+ self.test_one_thread(skew=-1)
+
class TestRunDirectly(GILTests):
def getcompiled(self, f, argtypes):
diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py
--- a/pypy/module/thread/test/test_thread.py
+++ b/pypy/module/thread/test/test_thread.py
@@ -225,7 +225,8 @@
def busy_wait():
for x in range(1000):
- time.sleep(0.01)
+ print 'tick...', x # <-force the GIL to be released, as
+ time.sleep(0.01) # time.sleep doesn't do non-translated
# This is normally called by app_main.py
signal.signal(signal.SIGINT, signal.default_int_handler)
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -6,6 +6,7 @@
from pypy.interpreter.typedef import default_identity_hash
from pypy.tool.sourcetools import compile2, func_with_new_name
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
+from pypy.rlib.objectmodel import specialize
def object_getattribute(space):
"Utility that returns the app-level descriptor object.__getattribute__."
@@ -257,15 +258,15 @@
msg = "'%s' has no length" % (name,)
raise OperationError(space.w_TypeError, space.wrap(msg))
w_res = space.get_and_call_function(w_descr, w_obj)
- space._check_len_result(w_res)
- return w_res
+ return space.wrap(space._check_len_result(w_res))
def _check_len_result(space, w_obj):
# Will complain if result is too big.
- result = space.int_w(w_obj)
+ result = space.int_w(space.int(w_obj))
if result < 0:
raise OperationError(space.w_ValueError,
space.wrap("__len__() should return >= 0"))
+ return result
def iter(space, w_obj):
w_descr = space.lookup(w_obj, '__iter__')
@@ -507,6 +508,7 @@
def issubtype(space, w_sub, w_type):
return space._type_issubtype(w_sub, w_type)
+ @specialize.arg_or_var(2)
def isinstance(space, w_inst, w_type):
return space.wrap(space._type_isinstance(w_inst, w_type))
diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py
--- a/pypy/objspace/std/boolobject.py
+++ b/pypy/objspace/std/boolobject.py
@@ -1,8 +1,10 @@
+from pypy.rlib.rbigint import rbigint
+from pypy.rlib.rarithmetic import r_uint
+from pypy.interpreter.error import OperationError
from pypy.objspace.std.model import registerimplementation, W_Object
from pypy.objspace.std.register_all import register_all
from pypy.objspace.std.intobject import W_IntObject
-
class W_BoolObject(W_Object):
from pypy.objspace.std.booltype import bool_typedef as typedef
_immutable_fields_ = ['boolval']
@@ -20,6 +22,21 @@
def unwrap(w_self, space):
return w_self.boolval
+ def int_w(w_self, space):
+ return int(w_self.boolval)
+
+ def uint_w(w_self, space):
+ intval = int(w_self.boolval)
+ if intval < 0:
+ raise OperationError(space.w_ValueError,
+ space.wrap("cannot convert negative integer to unsigned"))
+ else:
+ return r_uint(intval)
+
+ def bigint_w(w_self, space):
+ return rbigint.fromint(int(w_self.boolval))
+
+
registerimplementation(W_BoolObject)
W_BoolObject.w_False = W_BoolObject(False)
diff --git a/pypy/objspace/std/default.py b/pypy/objspace/std/default.py
--- a/pypy/objspace/std/default.py
+++ b/pypy/objspace/std/default.py
@@ -1,48 +1,16 @@
"""Default implementation for some operation."""
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, typed_unwrap_error_msg
from pypy.objspace.std.register_all import register_all
-from pypy.rlib import objectmodel
-# The following default implementations are used before delegation is tried.
-# 'id' is normally the address of the wrapper.
-
-def id__ANY(space, w_obj):
- #print 'id:', w_obj
- return space.wrap(objectmodel.compute_unique_id(w_obj))
-
# __init__ should succeed if called internally as a multimethod
def init__ANY(space, w_obj, __args__):
pass
-def typed_unwrap_error_msg(space, expected, w_obj):
- type_name = space.type(w_obj).getname(space)
- return space.wrap("expected %s, got %s object" % (expected, type_name))
-
-def int_w__ANY(space,w_obj):
- raise OperationError(space.w_TypeError,
- typed_unwrap_error_msg(space, "integer", w_obj))
-
-def str_w__ANY(space,w_obj):
- raise OperationError(space.w_TypeError,
- typed_unwrap_error_msg(space, "string", w_obj))
-
def float_w__ANY(space,w_obj):
raise OperationError(space.w_TypeError,
typed_unwrap_error_msg(space, "float", w_obj))
-def uint_w__ANY(space,w_obj):
- raise OperationError(space.w_TypeError,
- typed_unwrap_error_msg(space, "integer", w_obj))
-
-def unicode_w__ANY(space,w_obj):
- raise OperationError(space.w_TypeError,
- typed_unwrap_error_msg(space, "unicode", w_obj))
-
-def bigint_w__ANY(space,w_obj):
- raise OperationError(space.w_TypeError,
- typed_unwrap_error_msg(space, "integer", w_obj))
-
register_all(vars())
diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py
--- a/pypy/objspace/std/intobject.py
+++ b/pypy/objspace/std/intobject.py
@@ -1,12 +1,13 @@
from pypy.interpreter.error import OperationError
from pypy.objspace.std import newformat
+from pypy.objspace.std.inttype import wrapint
from pypy.objspace.std.model import registerimplementation, W_Object
-from pypy.objspace.std.register_all import register_all
from pypy.objspace.std.multimethod import FailedToImplementArgs
from pypy.objspace.std.noneobject import W_NoneObject
+from pypy.objspace.std.register_all import register_all
+from pypy.rlib import jit
from pypy.rlib.rarithmetic import ovfcheck, ovfcheck_lshift, LONG_BIT, r_uint
from pypy.rlib.rbigint import rbigint
-from pypy.objspace.std.inttype import wrapint
"""
In order to have the same behavior running
@@ -20,7 +21,7 @@
_immutable_fields_ = ['intval']
from pypy.objspace.std.inttype import int_typedef as typedef
-
+
def __init__(w_self, intval):
w_self.intval = intval
@@ -30,7 +31,18 @@
def unwrap(w_self, space):
return int(w_self.intval)
+ int_w = unwrap
+ def uint_w(w_self, space):
+ intval = w_self.intval
+ if intval < 0:
+ raise OperationError(space.w_ValueError,
+ space.wrap("cannot convert negative integer to unsigned"))
+ else:
+ return r_uint(intval)
+
+ def bigint_w(w_self, space):
+ return rbigint.fromint(w_self.intval)
registerimplementation(W_IntObject)
@@ -39,20 +51,6 @@
# alias and then teach copy_multimethods in smallintobject.py to override
# it. See int__Int for example.
-def int_w__Int(space, w_int1):
- return int(w_int1.intval)
-
-def uint_w__Int(space, w_int1):
- intval = w_int1.intval
- if intval < 0:
- raise OperationError(space.w_ValueError,
- space.wrap("cannot convert negative integer to unsigned"))
- else:
- return r_uint(intval)
-
-def bigint_w__Int(space, w_int1):
- return rbigint.fromint(w_int1.intval)
-
def repr__Int(space, w_int1):
a = w_int1.intval
res = str(a)
@@ -138,7 +136,7 @@
x = float(w_int1.intval)
y = float(w_int2.intval)
if y == 0.0:
- raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float division"))
+ raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float division"))
return space.wrap(x / y)
def mod__Int_Int(space, w_int1, w_int2):
@@ -172,7 +170,8 @@
# helper for pow()
-def _impl_int_int_pow(space, iv, iw, iz=0):
+ at jit.look_inside_iff(lambda space, iv, iw, iz: jit.isconstant(iw) and jit.isconstant(iz))
+def _impl_int_int_pow(space, iv, iw, iz):
if iw < 0:
if iz != 0:
raise OperationError(space.w_TypeError,
@@ -200,7 +199,7 @@
except OverflowError:
raise FailedToImplementArgs(space.w_OverflowError,
space.wrap("integer exponentiation"))
- return wrapint(space, ix)
+ return ix
def pow__Int_Int_Int(space, w_int1, w_int2, w_int3):
x = w_int1.intval
@@ -209,12 +208,12 @@
if z == 0:
raise OperationError(space.w_ValueError,
space.wrap("pow() 3rd argument cannot be 0"))
- return _impl_int_int_pow(space, x, y, z)
+ return space.wrap(_impl_int_int_pow(space, x, y, z))
def pow__Int_Int_None(space, w_int1, w_int2, w_int3):
x = w_int1.intval
y = w_int2.intval
- return _impl_int_int_pow(space, x, y)
+ return space.wrap(_impl_int_int_pow(space, x, y, 0))
def neg__Int(space, w_int1):
a = w_int1.intval
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -386,7 +386,11 @@
if len(items)== 0:
raise OperationError(space.w_IndexError,
space.wrap("pop from empty list"))
- idx = space.int_w(w_idx)
+ if space.isinstance_w(w_idx, space.w_float):
+ raise OperationError(space.w_TypeError,
+ space.wrap("integer argument expected, got float")
+ )
+ idx = space.int_w(space.int(w_idx))
try:
return items.pop(idx)
except IndexError:
diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py
--- a/pypy/objspace/std/longobject.py
+++ b/pypy/objspace/std/longobject.py
@@ -45,6 +45,26 @@
fromrarith_int._annspecialcase_ = "specialize:argtype(0)"
fromrarith_int = staticmethod(fromrarith_int)
+ def int_w(w_self, space):
+ try:
+ return w_self.num.toint()
+ except OverflowError:
+ raise OperationError(space.w_OverflowError, space.wrap(
+ "long int too large to convert to int"))
+
+ def uint_w(w_self, space):
+ try:
+ return w_self.num.touint()
+ except ValueError:
+ raise OperationError(space.w_ValueError, space.wrap(
+ "cannot convert negative integer to unsigned int"))
+ except OverflowError:
+ raise OperationError(space.w_OverflowError, space.wrap(
+ "long int too large to convert to unsigned int"))
+
+ def bigint_w(w_self, space):
+ return w_self.num
+
def __repr__(self):
return '<W_LongObject(%d)>' % self.num.tolong()
@@ -104,27 +124,6 @@
raise OperationError(space.w_OverflowError,
space.wrap("long int too large to convert to float"))
-def int_w__Long(space, w_value):
- try:
- return w_value.num.toint()
- except OverflowError:
- raise OperationError(space.w_OverflowError, space.wrap(
- "long int too large to convert to int"))
-
-
-def uint_w__Long(space, w_value):
- try:
- return w_value.num.touint()
- except ValueError:
- raise OperationError(space.w_ValueError, space.wrap(
- "cannot convert negative integer to unsigned int"))
- except OverflowError:
- raise OperationError(space.w_OverflowError, space.wrap(
- "long int too large to convert to unsigned int"))
-
-def bigint_w__Long(space, w_value):
- return w_value.num
-
def repr__Long(space, w_long):
return space.wrap(w_long.num.repr())
diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py
--- a/pypy/objspace/std/model.py
+++ b/pypy/objspace/std/model.py
@@ -442,6 +442,13 @@
mm.dispatch_tree = merge(self.dispatch_tree, other.dispatch_tree)
return mm
+NOT_MULTIMETHODS = dict.fromkeys(
+ ['delattr', 'delete', 'get', 'id', 'inplace_div', 'inplace_floordiv',
+ 'inplace_lshift', 'inplace_mod', 'inplace_pow', 'inplace_rshift',
+ 'inplace_truediv', 'is_', 'set', 'setattr', 'type', 'userdel',
+ 'isinstance', 'issubtype'])
+# XXX should we just remove those from the method table or we're happy
+# with just not having multimethods?
class MM:
"""StdObjSpace multimethods"""
@@ -451,22 +458,17 @@
init = StdObjSpaceMultiMethod('__init__', 1, general__args__=True)
getnewargs = StdObjSpaceMultiMethod('__getnewargs__', 1)
# special visible multimethods
- int_w = StdObjSpaceMultiMethod('int_w', 1, []) # returns an unwrapped int
- str_w = StdObjSpaceMultiMethod('str_w', 1, []) # returns an unwrapped string
float_w = StdObjSpaceMultiMethod('float_w', 1, []) # returns an unwrapped float
- uint_w = StdObjSpaceMultiMethod('uint_w', 1, []) # returns an unwrapped unsigned int (r_uint)
- unicode_w = StdObjSpaceMultiMethod('unicode_w', 1, []) # returns an unwrapped list of unicode characters
- bigint_w = StdObjSpaceMultiMethod('bigint_w', 1, []) # returns an unwrapped rbigint
# NOTE: when adding more sometype_w() methods, you need to write a
# stub in default.py to raise a space.w_TypeError
marshal_w = StdObjSpaceMultiMethod('marshal_w', 1, [], extra_args=['marshaller'])
- log = StdObjSpaceMultiMethod('log', 1, [], extra_args=['base'])
# add all regular multimethods here
for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable:
- if _name not in locals():
+ if _name not in locals() or _name in NOT_MULTIMETHODS:
mm = StdObjSpaceMultiMethod(_symbol, _arity, _specialnames)
locals()[_name] = mm
del mm
pow.extras['defaults'] = (None,)
+
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -1,19 +1,17 @@
import __builtin__
import types
-from pypy.interpreter import pyframe, function, special
+from pypy.interpreter import special
from pypy.interpreter.baseobjspace import ObjSpace, Wrappable
from pypy.interpreter.error import OperationError, operationerrfmt
from pypy.interpreter.typedef import get_unique_interplevel_subclass
from pypy.objspace.std import (builtinshortcut, stdtypedef, frame, model,
transparent, callmethod, proxyobject)
from pypy.objspace.descroperation import DescrOperation, raiseattrerror
-from pypy.rlib.objectmodel import instantiate, r_dict, specialize
+from pypy.rlib.objectmodel import instantiate, r_dict, specialize, is_annotation_constant
from pypy.rlib.debug import make_sure_not_resized
from pypy.rlib.rarithmetic import base_int, widen
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib import jit
-from pypy.rlib.rbigint import rbigint
-from pypy.tool.sourcetools import func_with_new_name
# Object imports
from pypy.objspace.std.boolobject import W_BoolObject
@@ -85,6 +83,12 @@
if self.config.objspace.std.withtproxy:
transparent.setup(self)
+ for type, classes in self.model.typeorder.iteritems():
+ if len(classes) >= 3:
+ # W_Root, AnyXxx and actual object
+ self.gettypefor(type).interplevel_cls = classes[0][0]
+
+
def get_builtin_types(self):
return self.builtin_types
@@ -569,10 +573,19 @@
return self.wrap(w_sub.issubtype(w_type))
raise OperationError(self.w_TypeError, self.wrap("need type objects"))
+ @specialize.arg_or_var(2)
def _type_isinstance(self, w_inst, w_type):
- if isinstance(w_type, W_TypeObject):
- return self.type(w_inst).issubtype(w_type)
- raise OperationError(self.w_TypeError, self.wrap("need type object"))
+ if not isinstance(w_type, W_TypeObject):
+ raise OperationError(self.w_TypeError,
+ self.wrap("need type object"))
+ if is_annotation_constant(w_type):
+ cls = w_type.interplevel_cls
+ if cls is not None:
+ assert w_inst is not None
+ if isinstance(w_inst, cls):
+ return True
+ return self.type(w_inst).issubtype(w_type)
+ @specialize.arg_or_var(2)
def isinstance_w(space, w_inst, w_type):
return space._type_isinstance(w_inst, w_type)
diff --git a/pypy/objspace/std/rangeobject.py b/pypy/objspace/std/rangeobject.py
--- a/pypy/objspace/std/rangeobject.py
+++ b/pypy/objspace/std/rangeobject.py
@@ -23,7 +23,7 @@
class W_RangeListObject(W_Object):
typedef = listtype.list_typedef
-
+
def __init__(w_self, start, step, length):
assert step != 0
w_self.start = start
@@ -40,7 +40,7 @@
if not length:
w_self.w_list = space.newlist([])
return w_self.w_list
-
+
arr = [None] * length # this is to avoid using append.
i = start
@@ -146,7 +146,11 @@
if length == 0:
raise OperationError(space.w_IndexError,
space.wrap("pop from empty list"))
- idx = space.int_w(w_idx)
+ if space.isinstance_w(w_idx, space.w_float):
+ raise OperationError(space.w_TypeError,
+ space.wrap("integer argument expected, got float")
+ )
+ idx = space.int_w(space.int(w_idx))
if idx == 0:
result = w_rangelist.start
w_rangelist.start += w_rangelist.step
diff --git a/pypy/objspace/std/ropeobject.py b/pypy/objspace/std/ropeobject.py
--- a/pypy/objspace/std/ropeobject.py
+++ b/pypy/objspace/std/ropeobject.py
@@ -34,12 +34,18 @@
def unwrap(w_self, space):
return w_self._node.flatten_string()
+ str_w = unwrap
def create_if_subclassed(w_self):
if type(w_self) is W_RopeObject:
return w_self
return W_RopeObject(w_self._node)
+ def unicode_w(w_self, space):
+ # XXX should this use the default encoding?
+ from pypy.objspace.std.unicodetype import plain_str2unicode
+ return plain_str2unicode(space, w_self._node.flatten_string())
+
W_RopeObject.EMPTY = W_RopeObject(rope.LiteralStringNode.EMPTY)
W_RopeObject.PREBUILT = [W_RopeObject(rope.LiteralStringNode.PREBUILT[i])
for i in range(256)]
@@ -663,9 +669,6 @@
return W_RopeObject(rope.concatenate(
rope.multiply(zero, middle), node))
-def str_w__Rope(space, w_str):
- return w_str._node.flatten_string()
-
def hash__Rope(space, w_str):
return wrapint(space, rope.hash_rope(w_str._node))
diff --git a/pypy/objspace/std/ropeunicodeobject.py b/pypy/objspace/std/ropeunicodeobject.py
--- a/pypy/objspace/std/ropeunicodeobject.py
+++ b/pypy/objspace/std/ropeunicodeobject.py
@@ -91,11 +91,17 @@
# for testing
return w_self._node.flatten_unicode()
+ def str_w(w_self, space):
+ return space.str_w(space.str(w_self))
+
def create_if_subclassed(w_self):
if type(w_self) is W_RopeUnicodeObject:
return w_self
return W_RopeUnicodeObject(w_self._node)
+ def unicode_w(self, space):
+ return self._node.flatten_unicode()
+
W_RopeUnicodeObject.EMPTY = W_RopeUnicodeObject(rope.LiteralStringNode.EMPTY)
registerimplementation(W_RopeUnicodeObject)
@@ -157,12 +163,6 @@
assert isinstance(w_uni, W_RopeUnicodeObject) # help the annotator!
return w_uni
-def str_w__RopeUnicode(space, w_uni):
- return space.str_w(space.str(w_uni))
-
-def unicode_w__RopeUnicode(space, w_uni):
- return w_uni._node.flatten_unicode()
-
def str__RopeUnicode(space, w_uni):
return space.call_method(w_uni, 'encode')
@@ -185,7 +185,7 @@
def eq__RopeUnicode_Rope(space, w_runi, w_rope):
from pypy.objspace.std.unicodeobject import _unicode_string_comparison
- return _unicode_string_comparison(space, w_runi, w_rope,
+ return _unicode_string_comparison(space, w_runi, w_rope,
False, unicode_from_string)
def ne__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
@@ -193,7 +193,7 @@
def ne__RopeUnicode_Rope(space, w_runi, w_rope):
from pypy.objspace.std.unicodeobject import _unicode_string_comparison
- return _unicode_string_comparison(space, w_runi, w_rope,
+ return _unicode_string_comparison(space, w_runi, w_rope,
True, unicode_from_string)
def gt__RopeUnicode_RopeUnicode(space, w_str1, w_str2):
@@ -247,7 +247,7 @@
if (len(l_w) == 1 and
space.is_w(space.type(l_w[0]), space.w_unicode)):
return l_w[0]
-
+
values_list = []
for i in range(len(l_w)):
w_item = l_w[i]
@@ -320,7 +320,7 @@
def make_generic(funcname):
- def func(space, w_self):
+ def func(space, w_self):
node = w_self._node
if node.length() == 0:
return space.w_False
@@ -578,7 +578,7 @@
return w_self.create_if_subclassed()
resultnode = rope.concatenate(rope.multiply(fillchar, padding), self)
return W_RopeUnicodeObject(resultnode)
-
+
def unicode_zfill__RopeUnicode_ANY(space, w_self, w_width):
self = w_self._node
length = self.length()
@@ -744,7 +744,7 @@
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("string too long"))
-
+
def unicode_encode__RopeUnicode_ANY_ANY(space, w_unistr,
w_encoding=None,
@@ -821,7 +821,7 @@
try:
w_newval = space.getitem(w_table, space.wrap(char))
except OperationError, e:
- if e.match(space, space.w_KeyError):
+ if e.match(space, space.w_LookupError):
result.append(crope)
else:
raise
@@ -848,7 +848,7 @@
hexdigits = "0123456789abcdef"
node = w_unicode._node
size = node.length()
-
+
singlequote = doublequote = False
iter = rope.ItemIterator(node)
for i in range(size):
@@ -900,7 +900,7 @@
])
j += 2
continue
-
+
if code >= 0x100:
result.extend(['\\', "u",
hexdigits[(code >> 12) & 0xf],
@@ -932,7 +932,7 @@
continue
if code < ord(' ') or code >= 0x7f:
result.extend(['\\', "x",
- hexdigits[(code >> 4) & 0xf],
+ hexdigits[(code >> 4) & 0xf],
hexdigits[(code >> 0) & 0xf],
])
j += 1
@@ -964,15 +964,15 @@
def next__RopeUnicodeIter(space, w_ropeiter):
if w_ropeiter.node is None:
- raise OperationError(space.w_StopIteration, space.w_None)
+ raise OperationError(space.w_StopIteration, space.w_None)
try:
unichar = w_ropeiter.item_iter.nextunichar()
w_item = space.wrap(unichar)
except StopIteration:
w_ropeiter.node = None
w_ropeiter.char_iter = None
- raise OperationError(space.w_StopIteration, space.w_None)
- w_ropeiter.index += 1
+ raise OperationError(space.w_StopIteration, space.w_None)
+ w_ropeiter.index += 1
return w_item
# XXX __length_hint__()
diff --git a/pypy/objspace/std/smallintobject.py b/pypy/objspace/std/smallintobject.py
--- a/pypy/objspace/std/smallintobject.py
+++ b/pypy/objspace/std/smallintobject.py
@@ -7,16 +7,30 @@
from pypy.objspace.std.register_all import register_all
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.objspace.std.intobject import W_IntObject
+from pypy.interpreter.error import OperationError
from pypy.rlib.objectmodel import UnboxedValue
+from pypy.rlib.rbigint import rbigint
+from pypy.rlib.rarithmetic import r_uint
from pypy.tool.sourcetools import func_with_new_name
-
class W_SmallIntObject(W_Object, UnboxedValue):
__slots__ = 'intval'
from pypy.objspace.std.inttype import int_typedef as typedef
def unwrap(w_self, space):
return int(w_self.intval)
+ int_w = unwrap
+
+ def uint_w(w_self, space):
+ intval = w_self.intval
+ if intval < 0:
+ raise OperationError(space.w_ValueError,
+ space.wrap("cannot convert negative integer to unsigned"))
+ else:
+ return r_uint(intval)
+
+ def bigint_w(w_self, space):
+ return rbigint.fromint(w_self.intval)
registerimplementation(W_SmallIntObject)
diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py
--- a/pypy/objspace/std/smalllongobject.py
+++ b/pypy/objspace/std/smalllongobject.py
@@ -39,6 +39,30 @@
def __repr__(w_self):
return '<W_SmallLongObject(%d)>' % w_self.longlong
+ def int_w(w_self, space):
+ a = w_self.longlong
+ b = intmask(a)
+ if b == a:
+ return b
+ else:
+ raise OperationError(space.w_OverflowError, space.wrap(
+ "long int too large to convert to int"))
+
+ def uint_w(w_self, space):
+ a = w_self.longlong
+ if a < 0:
+ raise OperationError(space.w_ValueError, space.wrap(
+ "cannot convert negative integer to unsigned int"))
+ b = r_uint(a)
+ if r_longlong(b) == a:
+ return b
+ else:
+ raise OperationError(space.w_OverflowError, space.wrap(
+ "long int too large to convert to unsigned int"))
+
+ def bigint_w(w_self, space):
+ return w_self.asbigint()
+
registerimplementation(W_SmallLongObject)
# ____________________________________________________________
@@ -102,30 +126,6 @@
def float__SmallLong(space, w_value):
return space.newfloat(float(w_value.longlong))
-def int_w__SmallLong(space, w_value):
- a = w_value.longlong
- b = intmask(a)
- if b == a:
- return b
- else:
- raise OperationError(space.w_OverflowError, space.wrap(
- "long int too large to convert to int"))
-
-def uint_w__SmallLong(space, w_value):
- a = w_value.longlong
- if a < 0:
- raise OperationError(space.w_ValueError, space.wrap(
- "cannot convert negative integer to unsigned int"))
- b = r_uint(a)
- if r_longlong(b) == a:
- return b
- else:
- raise OperationError(space.w_OverflowError, space.wrap(
- "long int too large to convert to unsigned int"))
-
-def bigint_w__SmallLong(space, w_value):
- return w_value.asbigint()
-
def lt__SmallLong_SmallLong(space, w_small1, w_small2):
return space.newbool(w_small1.longlong < w_small2.longlong)
def le__SmallLong_SmallLong(space, w_small1, w_small2):
diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py
--- a/pypy/objspace/std/strbufobject.py
+++ b/pypy/objspace/std/strbufobject.py
@@ -32,6 +32,9 @@
def unwrap(self, space):
return self.force()
+ def str_w(self, space):
+ return self.force()
+
registerimplementation(W_StringBufferObject)
# ____________________________________________________________
@@ -55,9 +58,6 @@
def len__StringBuffer(space, w_self):
return space.wrap(w_self.length)
-def str_w__StringBuffer(space, w_strbuf):
- return w_strbuf.force()
-
def add__StringBuffer_String(space, w_self, w_other):
if w_self.builder.getlength() != w_self.length:
builder = StringBuilder()
diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py
--- a/pypy/objspace/std/stringobject.py
+++ b/pypy/objspace/std/stringobject.py
@@ -33,17 +33,20 @@
def unwrap(w_self, space):
return w_self._value
+ def str_w(w_self, space):
+ return w_self._value
+
+ def unicode_w(w_self, space):
+ # XXX should this use the default encoding?
+ from pypy.objspace.std.unicodetype import plain_str2unicode
+ return plain_str2unicode(space, w_self._value)
+
registerimplementation(W_StringObject)
W_StringObject.EMPTY = W_StringObject('')
W_StringObject.PREBUILT = [W_StringObject(chr(i)) for i in range(256)]
del i
-def unicode_w__String(space, w_self):
- # XXX should this use the default encoding?
- from pypy.objspace.std.unicodetype import plain_str2unicode
- return plain_str2unicode(space, w_self._value)
-
def _is_generic(space, w_self, fun):
v = w_self._value
if len(v) == 0:
@@ -773,8 +776,6 @@
return space.wrap("".join(buf))
-def str_w__String(space, w_str):
- return w_str._value
def hash__String(space, w_str):
s = w_str._value
diff --git a/pypy/objspace/std/strjoinobject.py b/pypy/objspace/std/strjoinobject.py
--- a/pypy/objspace/std/strjoinobject.py
+++ b/pypy/objspace/std/strjoinobject.py
@@ -29,6 +29,7 @@
def unwrap(w_self, space):
return w_self.force()
+ str_w = unwrap
registerimplementation(W_StringJoinObject)
@@ -45,9 +46,6 @@
result += len(w_self.joined_strs[i])
return space.wrap(result)
-def str_w__StringJoin(space, w_str):
- return w_str.force()
-
def add__StringJoin_StringJoin(space, w_self, w_other):
if len(w_self.joined_strs) > w_self.until:
w_self.force(True)
diff --git a/pypy/objspace/std/strsliceobject.py b/pypy/objspace/std/strsliceobject.py
--- a/pypy/objspace/std/strsliceobject.py
+++ b/pypy/objspace/std/strsliceobject.py
@@ -31,6 +31,9 @@
w_self.stop = len(str)
return str
+ def str_w(w_self, space):
+ return w_self.force()
+
def __repr__(w_self):
""" representation for debugging purposes """
return "%s(%r[%d:%d])" % (w_self.__class__.__name__,
@@ -165,11 +168,6 @@
return space.w_True
return space.w_False
-
-def str_w__StringSlice(space, w_str):
- return w_str.force()
-
-
def getitem__StringSlice_ANY(space, w_str, w_index):
ival = space.getindex_w(w_index, space.w_IndexError, "string index")
slen = w_str.stop - w_str.start
diff --git a/pypy/objspace/std/test/test_boolobject.py b/pypy/objspace/std/test/test_boolobject.py
--- a/pypy/objspace/std/test/test_boolobject.py
+++ b/pypy/objspace/std/test/test_boolobject.py
@@ -17,6 +17,12 @@
def test_false(self):
assert not self.space.is_true(self.false)
+
+ def test_uint_w(self):
+ assert self.space.uint_w(self.true) == 1
+
+ def test_rbigint_w(self):
+ assert self.space.bigint_w(self.true)._digits == [1]
class AppTestAppBoolTest:
def test_bool_callable(self):
diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py
--- a/pypy/objspace/std/test/test_listobject.py
+++ b/pypy/objspace/std/test/test_listobject.py
@@ -705,6 +705,20 @@
l.pop()
assert l == range(9)
+ def test_pop_custom_int(self):
+ class A(object):
+ def __init__(self, x):
+ self.x = x
+
+ def __int__(self):
+ return self.x
+
+ l = range(10)
+ x = l.pop(A(-1))
+ assert x == 9
+ assert l == range(9)
+ raises(TypeError, range(10).pop, 1.0)
+
def test_remove(self):
c = list('hello world')
c.remove('l')
diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py
--- a/pypy/objspace/std/test/test_stdobjspace.py
+++ b/pypy/objspace/std/test/test_stdobjspace.py
@@ -46,3 +46,17 @@
assert space.sliceindices(w_slice, w(3)) == (1,2,1)
assert space.sliceindices(w_obj, w(3)) == (1,2,3)
+ def test_fastpath_isinstance(self):
+ from pypy.objspace.std.stringobject import W_StringObject
+ from pypy.objspace.std.intobject import W_IntObject
+
+ space = self.space
+ assert space.w_str.interplevel_cls is W_StringObject
+ assert space.w_int.interplevel_cls is W_IntObject
+ class X(W_StringObject):
+ def __init__(self):
+ pass
+
+ typedef = None
+
+ assert space.isinstance_w(X(), space.w_str)
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -126,6 +126,25 @@
raises(TypeError, type, 'test', 42, {})
raises(TypeError, type, 'test', (object,), 42)
+ def test_call_type_subclass(self):
+ class A(type):
+ pass
+
+ assert A("hello") is str
+
+ # Make sure type(x) doesn't call x.__class__.__init__
+ class T(type):
+ counter = 0
+ def __init__(self, *args):
+ T.counter += 1
+ class C:
+ __metaclass__ = T
+ assert T.counter == 1
+ a = C()
+ assert T.counter == 1
+ assert type(a) is C
+ assert T.counter == 1
+
def test_bases(self):
assert int.__bases__ == (object,)
class X:
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -443,6 +443,8 @@
assert u'<i><i><i>c' == u'abababc'.translate({ord('a'):None, ord('b'):u'<i>'})
assert u'c' == u'abababc'.translate({ord('a'):None, ord('b'):u''})
assert u'xyyx' == u'xzx'.translate({ord('z'):u'yy'})
+ assert u'abcd' == u'ab\0d'.translate(u'c')
+ assert u'abcd' == u'abcd'.translate(u'')
raises(TypeError, u'hello'.translate)
raises(TypeError, u'abababc'.translate, {ord('a'):''})
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -77,7 +77,7 @@
for i in range(len(self.lookup_where)):
self.lookup_where[i] = None_None
-# possible values of compares_by_identity_status
+# possible values of compares_by_identity_status
UNKNOWN = 0
COMPARES_BY_IDENTITY = 1
OVERRIDES_EQ_CMP_OR_HASH = 2
@@ -115,6 +115,9 @@
# of the __new__ is an instance of the type
w_bltin_new = None
+ interplevel_cls = None # not None for prebuilt instances of
+ # interpreter-level types
+
@dont_look_inside
def __init__(w_self, space, name, bases_w, dict_w,
overridetypedef=None):
@@ -355,7 +358,7 @@
if w_value is not None:
return w_value
return None
-
+
@unroll_safe
def _lookup(w_self, key):
space = w_self.space
@@ -819,14 +822,6 @@
def call__Type(space, w_type, __args__):
promote(w_type)
- # special case for type(x)
- if space.is_w(w_type, space.w_type):
- try:
- w_obj, = __args__.fixedunpack(1)
- except ValueError:
- pass
- else:
- return space.type(w_obj)
# invoke the __new__ of the type
if not we_are_jitted():
# note that the annotator will figure out that w_type.w_bltin_new can
@@ -853,7 +848,8 @@
call_init = space.isinstance_w(w_newobject, w_type)
# maybe invoke the __init__ of the type
- if call_init:
+ if (call_init and not (space.is_w(w_type, space.w_type) and
+ not __args__.keywords and len(__args__.arguments_w) == 1)):
w_descr = space.lookup(w_newobject, '__init__')
w_result = space.get_and_call_args(w_descr, w_newobject, __args__)
if not space.is_w(w_result, space.w_None):
diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py
--- a/pypy/objspace/std/typetype.py
+++ b/pypy/objspace/std/typetype.py
@@ -1,17 +1,28 @@
-from pypy.interpreter.error import OperationError, operationerrfmt
from pypy.interpreter import gateway
from pypy.interpreter.argument import Arguments
+from pypy.interpreter.error import OperationError, operationerrfmt
from pypy.interpreter.typedef import (GetSetProperty, descr_get_dict,
weakref_descr)
from pypy.objspace.std.stdtypedef import StdTypeDef
-def descr__new__(space, w_typetype, w_name, w_bases, w_dict):
+
+def descr__new__(space, w_typetype, w_name, w_bases=gateway.NoneNotWrapped,
+ w_dict=gateway.NoneNotWrapped):
+
"This is used to create user-defined classes only."
from pypy.objspace.std.typeobject import W_TypeObject
# XXX check types
w_typetype = _precheck_for_new(space, w_typetype)
+ # special case for type(x)
+ if (space.is_w(space.type(w_typetype), space.w_type) and w_bases is None and
+ w_dict is None):
+ return space.type(w_name)
+ elif w_bases is None or w_dict is None:
+ raise OperationError(space.w_TypeError, space.wrap("type() takes 1 or 3 arguments"))
+
+
bases_w = space.fixedview(w_bases)
w_winner = w_typetype
diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -40,6 +40,12 @@
return w_self
return W_UnicodeObject(w_self._value)
+ def str_w(self, space):
+ return space.str_w(space.str(self))
+
+ def unicode_w(self, space):
+ return self._value
+
W_UnicodeObject.EMPTY = W_UnicodeObject(u'')
registerimplementation(W_UnicodeObject)
@@ -99,12 +105,6 @@
return space.not_(result)
return result
-def str_w__Unicode(space, w_uni):
- return space.str_w(str__Unicode(space, w_uni))
-
-def unicode_w__Unicode(space, w_uni):
- return w_uni._value
-
def str__Unicode(space, w_uni):
from pypy.objspace.std.unicodetype import encode_object
return encode_object(space, w_uni, None, None)
@@ -893,7 +893,7 @@
try:
w_newval = space.getitem(w_table, space.wrap(ord(unichar)))
except OperationError, e:
- if e.match(space, space.w_KeyError):
+ if e.match(space, space.w_LookupError):
result.append(unichar)
else:
raise
diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py
--- a/pypy/objspace/test/test_descroperation.py
+++ b/pypy/objspace/test/test_descroperation.py
@@ -667,5 +667,19 @@
return -1L
raises(ValueError, len, Y())
+ def test_len_custom__int__(self):
+ class X(object):
+ def __init__(self, x):
+ self.x = x
+ def __len__(self):
+ return self.x
+ def __int__(self):
+ return self.x
+
+ l = len(X(3.0))
+ assert l == 3 and type(l) is int
+ l = len(X(X(2)))
+ assert l == 2 and type(l) is int
+
class AppTestWithBuiltinShortcut(AppTest_Descroperation):
OPTIONS = {'objspace.std.builtinshortcut': True}
diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py
--- a/pypy/rlib/jit.py
+++ b/pypy/rlib/jit.py
@@ -158,7 +158,7 @@
return decorator
@oopspec("jit.isconstant(value)")
- at specialize.argtype(0)
+ at specialize.ll()
def isconstant(value):
"""
While tracing, returns whether or not the value is currently known to be
@@ -167,10 +167,7 @@
This is for advanced usage only.
"""
- # I hate the annotator so much.
- if NonConstant(False):
- return True
- return False
+ return NonConstant(False)
@oopspec("jit.isvirtual(value)")
@specialize.ll()
@@ -181,9 +178,7 @@
This is for advanced usage only.
"""
- if NonConstant(False):
- return True
- return False
+ return NonConstant(False)
class Entry(ExtRegistryEntry):
_about_ = hint
diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py
--- a/pypy/rlib/objectmodel.py
+++ b/pypy/rlib/objectmodel.py
@@ -46,6 +46,17 @@
return decorated_func
+ def arg_or_var(self, *args):
+ """ Same as arg, but additionally allow for a 'variable' annotation,
+ that would simply be a situation where designated arg is not
+ a constant
+ """
+ def decorated_func(func):
+ func._annspecialcase_ = 'specialize:arg_or_var' + self._wrap(args)
+ return func
+
+ return decorated_func
+
def argtype(self, *args):
""" Specialize function based on types of arguments on given positions.
@@ -165,6 +176,24 @@
def keepalive_until_here(*values):
pass
+def is_annotation_constant(thing):
+ """ Returns whether the annotator can prove that the argument is constant.
+ For advanced usage only."""
+ return True
+
+class Entry(ExtRegistryEntry):
+ _about_ = is_annotation_constant
+
+ def compute_result_annotation(self, s_arg):
+ from pypy.annotation import model
+ r = model.SomeBool()
+ r.const = s_arg.is_constant()
+ return r
+
+ def specialize_call(self, hop):
+ from pypy.rpython.lltypesystem import lltype
+ return hop.inputconst(lltype.Bool, hop.s_result.const)
+
# ____________________________________________________________
class FREED_OBJECT(object):
diff --git a/pypy/rlib/rgc.py b/pypy/rlib/rgc.py
--- a/pypy/rlib/rgc.py
+++ b/pypy/rlib/rgc.py
@@ -3,6 +3,7 @@
from pypy.rlib import jit
from pypy.rlib.objectmodel import we_are_translated, enforceargs, specialize
+from pypy.rlib.nonconst import NonConstant
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rpython.lltypesystem import lltype, llmemory
@@ -143,6 +144,10 @@
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.objectmodel import keepalive_until_here
+ # XXX: Hack to ensure that we get a proper effectinfo.write_descrs_arrays
+ if NonConstant(False):
+ dest[dest_start] = source[source_start]
+
# supports non-overlapping copies only
if not we_are_translated():
if source == dest:
diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py
--- a/pypy/rlib/ropenssl.py
+++ b/pypy/rlib/ropenssl.py
@@ -62,8 +62,7 @@
"OPENSSL_VERSION_NUMBER")
SSLEAY_VERSION = rffi_platform.DefinedConstantString(
"SSLEAY_VERSION", "SSLeay_version(SSLEAY_VERSION)")
- OPENSSL_NO_SSL2 = rffi_platform.DefinedConstantInteger(
- "OPENSSL_NO_SSL2")
+ OPENSSL_NO_SSL2 = rffi_platform.Defined("OPENSSL_NO_SSL2")
SSL_FILETYPE_PEM = rffi_platform.ConstantInteger("SSL_FILETYPE_PEM")
SSL_OP_ALL = rffi_platform.ConstantInteger("SSL_OP_ALL")
SSL_VERIFY_NONE = rffi_platform.ConstantInteger("SSL_VERIFY_NONE")
diff --git a/pypy/rlib/rstacklet.py b/pypy/rlib/rstacklet.py
--- a/pypy/rlib/rstacklet.py
+++ b/pypy/rlib/rstacklet.py
@@ -99,12 +99,20 @@
return False
def add(self, h):
if not self.sthread.is_empty_handle(h):
+ if h == self.sthread.get_null_handle():
+ raise StackletDebugError("unexpected null handle")
self.active.append(h)
def remove(self, h):
try:
i = self.active.index(h)
except ValueError:
- raise StackletDebugError
+ if self.sthread.is_empty_handle(h):
+ msg = "empty stacklet handle"
+ elif h == self.sthread.get_null_handle():
+ msg = "unexpected null handle"
+ else:
+ msg = "double usage of handle %r" % (h,)
+ raise StackletDebugError(msg)
del self.active[i]
debug = Debug()
diff --git a/pypy/rlib/test/test_jit.py b/pypy/rlib/test/test_jit.py
--- a/pypy/rlib/test/test_jit.py
+++ b/pypy/rlib/test/test_jit.py
@@ -1,7 +1,7 @@
import py
from pypy.conftest import option
from pypy.rlib.jit import hint, we_are_jitted, JitDriver, elidable_promote
-from pypy.rlib.jit import JitHintError, oopspec
+from pypy.rlib.jit import JitHintError, oopspec, isconstant
from pypy.translator.translator import TranslationContext, graphof
from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin
from pypy.rpython.lltypesystem import lltype
@@ -137,6 +137,16 @@
t.view()
# assert did not raise
+ def test_isconstant(self):
+ def f(n):
+ assert n >= 0
+ assert isconstant(n) is False
+ l = []
+ l.append(n)
+ return len(l)
+ res = self.interpret(f, [234])
+ assert res == 1
+
class TestJITLLtype(BaseTestJIT, LLRtypeMixin):
pass
diff --git a/pypy/rlib/test/test_objectmodel.py b/pypy/rlib/test/test_objectmodel.py
--- a/pypy/rlib/test/test_objectmodel.py
+++ b/pypy/rlib/test/test_objectmodel.py
@@ -339,6 +339,19 @@
res = self.interpret(f, [42])
assert res == 84
+ def test_isconstant(self):
+ from pypy.rlib.objectmodel import is_annotation_constant, specialize
+
+ @specialize.arg_or_var(0)
+ def f(arg):
+ if is_annotation_constant(arg):
+ return 1
+ return 10
+
+ def fn(arg):
+ return f(arg) + f(3)
+
+ assert self.interpret(fn, [15]) == 11
class TestLLtype(BaseTestObjectModel, LLRtypeMixin):
@@ -451,5 +464,4 @@
if llop.opname == 'malloc_varsize':
break
assert llop.args[2] is graph.startblock.inputargs[0]
-
diff --git a/pypy/rlib/test/test_rstacklet.py b/pypy/rlib/test/test_rstacklet.py
--- a/pypy/rlib/test/test_rstacklet.py
+++ b/pypy/rlib/test/test_rstacklet.py
@@ -264,6 +264,10 @@
gcrootfinder = 'shadowstack'
+def test_dont_keep_debug_to_true():
+ assert not rstacklet.DEBUG
+
+
def target(*args):
return entry_point, None
diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py
--- a/pypy/rpython/lltypesystem/ll2ctypes.py
+++ b/pypy/rpython/lltypesystem/ll2ctypes.py
@@ -140,7 +140,8 @@
if isinstance(FIELDTYPE, lltype.Ptr):
cls = get_ctypes_type(FIELDTYPE, delayed_builders)
else:
- cls = get_ctypes_type(FIELDTYPE)
+ cls = get_ctypes_type(FIELDTYPE, delayed_builders,
+ cannot_delay=True)
fields.append((fieldname, cls))
CStruct._fields_ = fields
@@ -169,7 +170,7 @@
CStruct._normalized_ctype = get_ctypes_type(S)
builder() # no need to be lazy here
else:
- delayed_builders.append(builder)
+ delayed_builders.append((S, builder))
return CStruct
def build_ctypes_array(A, delayed_builders, max_n=0):
@@ -252,11 +253,19 @@
else:
return get_ctypes_type(FIELDTYPE)
-def get_ctypes_type(T, delayed_builders=None):
+def get_ctypes_type(T, delayed_builders=None, cannot_delay=False):
+ # Check delayed builders
+ if cannot_delay and delayed_builders:
+ for T2, builder in delayed_builders:
+ if T2 is T:
+ builder()
+ delayed_builders.remove((T2, builder))
+ return _ctypes_cache[T]
+
try:
return _ctypes_cache[T]
except KeyError:
- toplevel = delayed_builders is None
+ toplevel = cannot_delay or delayed_builders is None
if toplevel:
delayed_builders = []
cls = build_new_ctypes_type(T, delayed_builders)
@@ -306,9 +315,11 @@
def complete_builders(delayed_builders):
while delayed_builders:
- delayed_builders.pop()()
+ T, builder = delayed_builders[0]
+ builder()
+ delayed_builders.pop(0)
-def convert_struct(container, cstruct=None):
+def convert_struct(container, cstruct=None, delayed_converters=None):
STRUCT = container._TYPE
if cstruct is None:
# if 'container' is an inlined substructure, convert the whole
@@ -325,23 +336,38 @@
n = None
cstruct = cls._malloc(n)
add_storage(container, _struct_mixin, ctypes.pointer(cstruct))
+
+ if delayed_converters is None:
+ delayed_converters_was_None = True
+ delayed_converters = []
+ else:
+ delayed_converters_was_None = False
for field_name in STRUCT._names:
FIELDTYPE = getattr(STRUCT, field_name)
field_value = getattr(container, field_name)
if not isinstance(FIELDTYPE, lltype.ContainerType):
# regular field
if FIELDTYPE != lltype.Void:
- setattr(cstruct, field_name, lltype2ctypes(field_value))
+ def convert(field_name=field_name, field_value=field_value):
+ setattr(cstruct, field_name, lltype2ctypes(field_value))
+ if isinstance(FIELDTYPE, lltype.Ptr):
+ delayed_converters.append(convert)
+ else:
+ convert()
else:
# inlined substructure/subarray
if isinstance(FIELDTYPE, lltype.Struct):
csubstruct = getattr(cstruct, field_name)
- convert_struct(field_value, csubstruct)
+ convert_struct(field_value, csubstruct,
+ delayed_converters=delayed_converters)
elif field_name == STRUCT._arrayfld: # inlined var-sized part
csubarray = getattr(cstruct, field_name)
convert_array(field_value, csubarray)
else:
raise NotImplementedError('inlined field', FIELDTYPE)
+ if delayed_converters_was_None:
+ for converter in delayed_converters:
+ converter()
remove_regular_struct_content(container)
def remove_regular_struct_content(container):
@@ -358,7 +384,8 @@
# bigger structure at once
parent, parentindex = lltype.parentlink(container)
if parent is not None:
- convert_struct(parent)
+ if not isinstance(parent, _parentable_mixin):
+ convert_struct(parent)
return
# regular case: allocate a new ctypes array of the proper type
cls = get_ctypes_type(ARRAY)
diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py
--- a/pypy/rpython/lltypesystem/opimpl.py
+++ b/pypy/rpython/lltypesystem/opimpl.py
@@ -357,7 +357,7 @@
def op_cast_float_to_uint(f):
assert type(f) is float
- return r_uint(int(f))
+ return r_uint(long(f))
def op_cast_float_to_longlong(f):
assert type(f) is float
@@ -369,7 +369,7 @@
def op_cast_float_to_ulonglong(f):
assert type(f) is float
- return r_ulonglong(r_longlong(f))
+ return r_ulonglong(long(f))
def op_cast_char_to_int(b):
assert type(b) is str and len(b) == 1
diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py
--- a/pypy/rpython/lltypesystem/rlist.py
+++ b/pypy/rpython/lltypesystem/rlist.py
@@ -1,15 +1,15 @@
+from pypy.rlib import rgc, jit
+from pypy.rlib.debug import ll_assert
+from pypy.rlib.objectmodel import enforceargs
+from pypy.rpython.lltypesystem import rstr
+from pypy.rpython.lltypesystem.lltype import (GcForwardReference, Ptr, GcArray,
+ GcStruct, Void, Signed, malloc, typeOf, nullptr, typeMethod)
+from pypy.rpython.rlist import (AbstractBaseListRepr, AbstractListRepr,
+ AbstractFixedSizeListRepr, AbstractListIteratorRepr, ll_setitem_nonneg,
+ ADTIList, ADTIFixedList, dum_nocheck)
+from pypy.rpython.rmodel import Repr, inputconst, externalvsinternal
from pypy.tool.pairtype import pairtype, pair
-from pypy.rpython.rmodel import Repr, inputconst
-from pypy.rpython.rmodel import externalvsinternal
-from pypy.rpython.rlist import AbstractBaseListRepr, AbstractListRepr, \
- AbstractFixedSizeListRepr, AbstractListIteratorRepr, \
- ll_setitem_nonneg, ADTIList, ADTIFixedList
-from pypy.rpython.rlist import dum_nocheck
-from pypy.rpython.lltypesystem.lltype import GcForwardReference, Ptr, GcArray,\
- GcStruct, Void, Signed, malloc, typeOf, nullptr, typeMethod
-from pypy.rpython.lltypesystem import rstr
-from pypy.rlib.debug import ll_assert
-from pypy.rlib import rgc, jit
+
# ____________________________________________________________
#
@@ -171,6 +171,7 @@
# adapted C code
+ at enforceargs(None, int)
def _ll_list_resize_really(l, newsize):
"""
Ensure l.items has room for at least newsize elements, and set
@@ -210,7 +211,6 @@
rgc.ll_arraycopy(items, newitems, 0, 0, p)
l.length = newsize
l.items = newitems
-_ll_list_resize_really._annenforceargs_ = (None, int)
# this common case was factored out of _ll_list_resize
# to see if inlining it gives some speed-up.
diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py
--- a/pypy/rpython/lltypesystem/rstr.py
+++ b/pypy/rpython/lltypesystem/rstr.py
@@ -694,8 +694,8 @@
return -1
return count
+ @enforceargs(int, None)
@jit.look_inside_iff(lambda length, items: jit.isconstant(length) and length <= 2)
- @enforceargs(int, None)
def ll_join_strs(length, items):
# Special case for length 1 items, helps both the JIT and other code
if length == 1:
diff --git a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py
--- a/pypy/rpython/lltypesystem/test/test_ll2ctypes.py
+++ b/pypy/rpython/lltypesystem/test/test_ll2ctypes.py
@@ -82,7 +82,6 @@
assert not ALLOCATED # detects memory leaks in the test
def test_get_pointer(self):
- py.test.skip("FIXME")
# Equivalent of the C code::
# struct S1 { struct S2 *ptr; struct S2 buf; };
# struct S1 s1;
diff --git a/pypy/rpython/lltypesystem/test/test_lloperation.py b/pypy/rpython/lltypesystem/test/test_lloperation.py
--- a/pypy/rpython/lltypesystem/test/test_lloperation.py
+++ b/pypy/rpython/lltypesystem/test/test_lloperation.py
@@ -5,6 +5,7 @@
from pypy.rpython.llinterp import LLFrame
from pypy.rpython.test.test_llinterp import interpret
from pypy.rpython import rclass
+from pypy.rlib.rarithmetic import LONGLONG_MASK, r_longlong, r_ulonglong
LL_INTERP_OPERATIONS = [name[3:] for name in LLFrame.__dict__.keys()
if name.startswith('op_')]
@@ -133,6 +134,14 @@
py.test.raises(TypeError, llop.getinteriorfield,
lltype.Signed, s3, 'y')
+def test_cast_float_to_ulonglong():
+ f = 12350000000000000000.0
+ py.test.raises(OverflowError, r_longlong, f)
+ r_longlong(f / 2) # does not raise OverflowError
+ #
+ x = llop.cast_float_to_ulonglong(lltype.UnsignedLongLong, f)
+ assert x == r_ulonglong(f)
+
# ___________________________________________________________________________
# This tests that the LLInterpreter and the LL_OPERATIONS tables are in sync.
diff --git a/pypy/rpython/module/ll_os_stat.py b/pypy/rpython/module/ll_os_stat.py
--- a/pypy/rpython/module/ll_os_stat.py
+++ b/pypy/rpython/module/ll_os_stat.py
@@ -173,7 +173,8 @@
_compilation_info_ = compilation_info
STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS)
try:
- config = platform.configure(CConfig)
+ config = platform.configure(CConfig, ignore_errors=
+ try_to_add is not None)
except platform.CompilationError:
if try_to_add:
return # failed to add this field, give up
diff --git a/pypy/rpython/rlist.py b/pypy/rpython/rlist.py
--- a/pypy/rpython/rlist.py
+++ b/pypy/rpython/rlist.py
@@ -11,7 +11,7 @@
from pypy.rlib.debug import ll_assert
from pypy.rlib.rarithmetic import ovfcheck, widen, r_uint, intmask
from pypy.rpython.annlowlevel import ADTInterface
-from pypy.rlib import rgc
+from pypy.rlib import rgc, jit
ADTIFixedList = ADTInterface(None, {
'll_newlist': (['SELF', Signed ], 'self'),
@@ -912,6 +912,8 @@
return l
# no oopspec -- the function is inlined by the JIT
+ at jit.look_inside_iff(lambda l, start: jit.isconstant(start) and jit.isvirtual(l))
+ at jit.oopspec('list.delslice_startonly(l, start)')
def ll_listdelslice_startonly(l, start):
ll_assert(start >= 0, "del l[start:] with unexpectedly negative start")
ll_assert(start <= l.ll_length(), "del l[start:] with start > len(l)")
@@ -923,7 +925,6 @@
l.ll_setitem_fast(j, null)
j -= 1
l._ll_resize_le(newlength)
-ll_listdelslice_startonly.oopspec = 'list.delslice_startonly(l, start)'
def ll_listdelslice_startstop(l, start, stop):
length = l.ll_length()
diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py
--- a/pypy/rpython/tool/rffi_platform.py
+++ b/pypy/rpython/tool/rffi_platform.py
@@ -171,7 +171,7 @@
eci = self.config._compilation_info_
try_compile_cache([self.path], eci)
-def configure(CConfig):
+def configure(CConfig, ignore_errors=False):
"""Examine the local system by running the C compiler.
The CConfig class contains CConfigEntry attribues that describe
what should be inspected; configure() returns a dict mapping
@@ -199,7 +199,8 @@
writer.close()
eci = CConfig._compilation_info_
- infolist = list(run_example_code(writer.path, eci))
+ infolist = list(run_example_code(writer.path, eci,
+ ignore_errors=ignore_errors))
assert len(infolist) == len(entries)
resultinfo = {}
@@ -680,10 +681,10 @@
}
"""
-def run_example_code(filepath, eci):
+def run_example_code(filepath, eci, ignore_errors=False):
eci = eci.convert_sources_to_files(being_main=True)
files = [filepath]
- output = build_executable_cache(files, eci)
+ output = build_executable_cache(files, eci, ignore_errors=ignore_errors)
section = None
for line in output.splitlines():
line = line.strip()
diff --git a/pypy/tool/gcc_cache.py b/pypy/tool/gcc_cache.py
--- a/pypy/tool/gcc_cache.py
+++ b/pypy/tool/gcc_cache.py
@@ -16,7 +16,7 @@
hash = md5(key).hexdigest()
return cache_dir.join(hash)
-def build_executable_cache(c_files, eci):
+def build_executable_cache(c_files, eci, ignore_errors=False):
"Builds and run a program; caches the result"
# Import 'platform' every time, the compiler may have been changed
from pypy.translator.platform import platform
@@ -24,7 +24,18 @@
try:
return path.read()
except py.error.Error:
- result = platform.execute(platform.compile(c_files, eci))
+ _previous = platform.log_errors
+ try:
+ if ignore_errors:
+ platform.log_errors = False
+ result = platform.execute(platform.compile(c_files, eci))
+ finally:
+ if ignore_errors:
+ del platform.log_errors
+ # ^^^remove from the instance --- needed so that it can
+ # compare equal to another instance without it
+ if platform.log_errors != _previous:
+ platform.log_errors = _previous
path.write(result.out)
return result.out
diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py
--- a/pypy/tool/pytest/appsupport.py
+++ b/pypy/tool/pytest/appsupport.py
@@ -72,7 +72,9 @@
space = self.space
retval = []
for arg in self.code.getargs():
- w_val = space.getitem(self.w_locals, space.wrap(arg))
+ w_val = space.finditem(self.w_locals, space.wrap(arg))
+ if w_val is None:
+ w_val = space.wrap('<no value found>')
retval.append((arg, w_val))
return retval
diff --git a/pypy/tool/test/test_gcc_cache.py b/pypy/tool/test/test_gcc_cache.py
--- a/pypy/tool/test/test_gcc_cache.py
+++ b/pypy/tool/test/test_gcc_cache.py
@@ -77,3 +77,17 @@
finally:
sys.stderr = oldstderr
assert 'ERROR' not in capture.getvalue().upper()
+
+def test_execute_code_ignore_errors():
+ f = localudir.join('z.c')
+ f.write("""this file is not valid C code\n""")
+ eci = ExternalCompilationInfo()
+ oldstderr = sys.stderr
+ try:
+ sys.stderr = capture = cStringIO.StringIO()
+ py.test.raises(CompilationError, build_executable_cache,
+ [f], eci, True)
+ finally:
+ sys.stderr = oldstderr
+ assert 'ERROR' not in capture.getvalue().upper()
+
diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py
--- a/pypy/translator/c/gcc/trackgcroot.py
+++ b/pypy/translator/c/gcc/trackgcroot.py
@@ -486,6 +486,8 @@
'paddq', 'pinsr',
# zero-extending moves should not produce GC pointers
'movz',
+ # locked operations should not move GC pointers, at least so far
+ 'lock',
])
# a partial list is hopefully good enough for now; it's all to support
diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py
--- a/pypy/translator/c/genc.py
+++ b/pypy/translator/c/genc.py
@@ -563,7 +563,10 @@
else:
mk.definition('PYPY_MAIN_FUNCTION', "main")
- if sys.platform == 'win32':
+ if (py.path.local.sysfind('python') or
+ py.path.local.sysfind('python.exe')):
+ python = 'python '
+ elif sys.platform == 'win32':
python = sys.executable.replace('\\', '/') + ' '
else:
python = sys.executable + ' '
diff --git a/pypy/translator/c/src/allocator.h b/pypy/translator/c/src/allocator.h
--- a/pypy/translator/c/src/allocator.h
+++ b/pypy/translator/c/src/allocator.h
@@ -6,11 +6,6 @@
#ifndef PYPY_NOT_MAIN_FILE
-#ifdef AVR
- #ifndef NO_OBMALLOC
- #define NO_OBMALLOC
- #endif
-#endif
#if defined(TRIVIAL_MALLOC_DEBUG)
void *PyObject_Malloc(size_t n) { return malloc(n); }
diff --git a/pypy/translator/c/src/g_include.h b/pypy/translator/c/src/g_include.h
--- a/pypy/translator/c/src/g_include.h
+++ b/pypy/translator/c/src/g_include.h
@@ -31,9 +31,7 @@
#include "src/char.h"
#include "src/float.h"
#include "src/address.h"
-#ifndef AVR
#include "src/unichar.h"
-#endif
#include "src/llgroup.h"
#include "src/instrument.h"
@@ -48,11 +46,9 @@
# include "src/rtyper.h"
# include "src/debug_traceback.h"
# include "src/debug_alloc.h"
-#ifndef AVR
# include "src/ll_os.h"
# include "src/ll_strtod.h"
#endif
-#endif
#ifdef PYPY_STANDALONE
# include "src/allocator.h"
diff --git a/pypy/translator/c/src/g_prerequisite.h b/pypy/translator/c/src/g_prerequisite.h
--- a/pypy/translator/c/src/g_prerequisite.h
+++ b/pypy/translator/c/src/g_prerequisite.h
@@ -13,10 +13,8 @@
# include <io.h> /* needed, otherwise _lseeki64 truncates to 32-bits (??) */
#endif
-#ifndef AVR
#include "thread.h" /* needs to be included early to define the
struct RPyOpaque_ThreadLock */
-#endif
#include <stddef.h>
diff --git a/pypy/translator/c/src/main.h b/pypy/translator/c/src/main.h
--- a/pypy/translator/c/src/main.h
+++ b/pypy/translator/c/src/main.h
@@ -75,9 +75,7 @@
memory_out:
errmsg = "out of memory";
error:
-#ifndef AVR
fprintf(stderr, "Fatal error during initialization: %s\n", errmsg);
-#endif
abort();
return 1;
}
diff --git a/pypy/translator/c/src/thread.h b/pypy/translator/c/src/thread.h
--- a/pypy/translator/c/src/thread.h
+++ b/pypy/translator/c/src/thread.h
@@ -37,14 +37,9 @@
#endif
-/* common helper: this does nothing, but is called with the GIL released.
- This gives other threads a chance to grab the GIL and run. */
-void RPyThreadYield(void);
-
-#ifndef PYPY_NOT_MAIN_FILE
-void RPyThreadYield(void)
-{
-}
-#endif
+long RPyGilAllocate(void);
+long RPyGilYieldThread(void);
+void RPyGilRelease(void);
+void RPyGilAcquire(void);
#endif
diff --git a/pypy/translator/c/src/thread_nt.h b/pypy/translator/c/src/thread_nt.h
--- a/pypy/translator/c/src/thread_nt.h
+++ b/pypy/translator/c/src/thread_nt.h
@@ -221,4 +221,57 @@
#define RPyThreadTLS_Set(key, value) TlsSetValue(key, value)
+/************************************************************/
+/* GIL code */
+/************************************************************/
+
+static volatile LONG pending_acquires = -1;
+static CRITICAL_SECTION mutex_gil;
+static HANDLE cond_gil;
+
+long RPyGilAllocate(void)
+{
+ pending_acquires = 0;
+ InitializeCriticalSection(&mutex_gil);
+ EnterCriticalSection(&mutex_gil);
+ cond_gil = CreateEvent (NULL, FALSE, FALSE, NULL);
+ return 1;
+}
+
+long RPyGilYieldThread(void)
+{
+ /* can be called even before RPyGilAllocate(), but in this case,
+ pending_acquires will be -1 */
+ if (pending_acquires <= 0)
+ return 0;
+ InterlockedIncrement(&pending_acquires);
+ PulseEvent(&cond_gil);
+
+ /* hack: the three following lines do a pthread_cond_wait(), and
+ normally specifying a timeout of INFINITE would be fine. But the
+ first and second operations are not done atomically, so there is a
+ (small) risk that PulseEvent misses the WaitForSingleObject().
+ In this case the process will just sleep a few milliseconds. */
+ LeaveCriticalSection(&mutex_gil);
+ WaitForSingleObject(&cond_gil, 15);
+ EnterCriticalSection(&mutex_gil);
+
+ InterlockedDecrement(&pending_acquires);
+ return 1;
+}
+
+void RPyGilRelease(void)
+{
+ LeaveCriticalSection(&mutex_gil);
+ PulseEvent(&cond_gil);
+}
+
+void RPyGilAcquire(void)
+{
+ InterlockedIncrement(&pending_acquires);
+ EnterCriticalSection(&mutex_gil);
+ InterlockedDecrement(&pending_acquires);
+}
+
+
#endif /* PYPY_NOT_MAIN_FILE */
diff --git a/pypy/translator/c/src/thread_pthread.h b/pypy/translator/c/src/thread_pthread.h
--- a/pypy/translator/c/src/thread_pthread.h
+++ b/pypy/translator/c/src/thread_pthread.h
@@ -12,6 +12,7 @@
#include <signal.h>
#include <stdio.h>
#include <errno.h>
+#include <assert.h>
/* The following is hopefully equivalent to what CPython does
(which is trying to compile a snippet of code using it) */
@@ -459,4 +460,113 @@
#define RPyThreadTLS_Set(key, value) pthread_setspecific(key, value)
+/************************************************************/
+/* GIL code */
+/************************************************************/
+
+#ifdef __llvm__
+# define HAS_ATOMIC_ADD
+#endif
+
+#ifdef __GNUC__
+# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
+# define HAS_ATOMIC_ADD
+# endif
+#endif
+
+#ifdef HAS_ATOMIC_ADD
+# define atomic_add __sync_fetch_and_add
+#else
+# if defined(__amd64__)
+# define atomic_add(ptr, value) asm volatile ("lock addq %0, %1" \
+ : : "ri"(value), "m"(*(ptr)) : "memory")
+# elif defined(__i386__)
+# define atomic_add(ptr, value) asm volatile ("lock addl %0, %1" \
+ : : "ri"(value), "m"(*(ptr)) : "memory")
+# else
+# error "Please use gcc >= 4.1 or write a custom 'asm' for your CPU."
+# endif
+#endif
+
+#define ASSERT_STATUS(call) \
+ if (call != 0) { \
+ fprintf(stderr, "Fatal error: " #call "\n"); \
+ abort(); \
+ }
+
+static void _debug_print(const char *msg)
+{
+#if 0
+ int col = (int)pthread_self();
+ col = 31 + ((col / 8) % 8);
+ fprintf(stderr, "\033[%dm%s\033[0m", col, msg);
+#endif
+}
+
+static volatile long pending_acquires = -1;
+static pthread_mutex_t mutex_gil = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t cond_gil = PTHREAD_COND_INITIALIZER;
+
+static void assert_has_the_gil(void)
+{
+#ifdef RPY_ASSERT
+ assert(pthread_mutex_trylock(&mutex_gil) != 0);
+ assert(pending_acquires >= 0);
+#endif
+}
+
+long RPyGilAllocate(void)
+{
+ _debug_print("RPyGilAllocate\n");
+ pending_acquires = 0;
+ pthread_mutex_trylock(&mutex_gil);
+ assert_has_the_gil();
+ return 1;
+}
+
+long RPyGilYieldThread(void)
+{
+ /* can be called even before RPyGilAllocate(), but in this case,
+ pending_acquires will be -1 */
+#ifdef RPY_ASSERT
+ if (pending_acquires >= 0)
+ assert_has_the_gil();
+#endif
+ if (pending_acquires <= 0)
+ return 0;
+ atomic_add(&pending_acquires, 1L);
+ _debug_print("{");
+ ASSERT_STATUS(pthread_cond_signal(&cond_gil));
+ ASSERT_STATUS(pthread_cond_wait(&cond_gil, &mutex_gil));
+ _debug_print("}");
+ atomic_add(&pending_acquires, -1L);
+ assert_has_the_gil();
+ return 1;
+}
+
+void RPyGilRelease(void)
+{
+ _debug_print("RPyGilRelease\n");
+#ifdef RPY_ASSERT
+ assert(pending_acquires >= 0);
+#endif
+ assert_has_the_gil();
+ ASSERT_STATUS(pthread_mutex_unlock(&mutex_gil));
+ ASSERT_STATUS(pthread_cond_signal(&cond_gil));
+}
+
+void RPyGilAcquire(void)
+{
+ _debug_print("about to RPyGilAcquire...\n");
+#ifdef RPY_ASSERT
+ assert(pending_acquires >= 0);
+#endif
+ atomic_add(&pending_acquires, 1L);
+ ASSERT_STATUS(pthread_mutex_lock(&mutex_gil));
+ atomic_add(&pending_acquires, -1L);
+ assert_has_the_gil();
+ _debug_print("RPyGilAcquire\n");
+}
+
+
#endif /* PYPY_NOT_MAIN_FILE */
More information about the pypy-commit
mailing list