[pypy-commit] pypy default: merge
cfbolz
noreply at buildbot.pypy.org
Fri Nov 4 10:50:28 CET 2011
Author: Carl Friedrich Bolz <cfbolz at gmx.de>
Branch:
Changeset: r48736:a2743a169e88
Date: 2011-11-04 09:48 +0100
http://bitbucket.org/pypy/pypy/changeset/a2743a169e88/
Log: merge
diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py
--- a/pypy/jit/backend/x86/regloc.py
+++ b/pypy/jit/backend/x86/regloc.py
@@ -17,7 +17,7 @@
class AssemblerLocation(object):
# XXX: Is adding "width" here correct?
- __slots__ = ('value', 'width')
+ _attrs_ = ('value', 'width', '_location_code')
_immutable_ = True
def _getregkey(self):
return self.value
@@ -25,6 +25,9 @@
def is_memory_reference(self):
return self.location_code() in ('b', 's', 'j', 'a', 'm')
+ def location_code(self):
+ return self._location_code
+
def value_r(self): return self.value
def value_b(self): return self.value
def value_s(self): return self.value
@@ -38,6 +41,8 @@
class StackLoc(AssemblerLocation):
_immutable_ = True
+ _location_code = 'b'
+
def __init__(self, position, ebp_offset, num_words, type):
assert ebp_offset < 0 # so no confusion with RegLoc.value
self.position = position
@@ -49,9 +54,6 @@
def __repr__(self):
return '%d(%%ebp)' % (self.value,)
- def location_code(self):
- return 'b'
-
def assembler(self):
return repr(self)
@@ -63,8 +65,10 @@
self.is_xmm = is_xmm
if self.is_xmm:
self.width = 8
+ self._location_code = 'x'
else:
self.width = WORD
+ self._location_code = 'r'
def __repr__(self):
if self.is_xmm:
return rx86.R.xmmnames[self.value]
@@ -79,12 +83,6 @@
assert not self.is_xmm
return RegLoc(rx86.high_byte(self.value), False)
- def location_code(self):
- if self.is_xmm:
- return 'x'
- else:
- return 'r'
-
def assembler(self):
return '%' + repr(self)
@@ -97,14 +95,13 @@
class ImmedLoc(AssemblerLocation):
_immutable_ = True
width = WORD
+ _location_code = 'i'
+
def __init__(self, value):
from pypy.rpython.lltypesystem import rffi, lltype
# force as a real int
self.value = rffi.cast(lltype.Signed, value)
- def location_code(self):
- return 'i'
-
def getint(self):
return self.value
@@ -149,9 +146,6 @@
info = getattr(self, attr, '?')
return '<AddressLoc %r: %s>' % (self._location_code, info)
- def location_code(self):
- return self._location_code
-
def value_a(self):
return self.loc_a
@@ -191,6 +185,7 @@
# we want a width of 8 (... I think. Check this!)
_immutable_ = True
width = 8
+ _location_code = 'j'
def __init__(self, address):
self.value = address
@@ -198,9 +193,6 @@
def __repr__(self):
return '<ConstFloatLoc @%s>' % (self.value,)
- def location_code(self):
- return 'j'
-
if IS_X86_32:
class FloatImmedLoc(AssemblerLocation):
# This stands for an immediate float. It cannot be directly used in
@@ -209,6 +201,7 @@
# instead; see below.
_immutable_ = True
width = 8
+ _location_code = '#' # don't use me
def __init__(self, floatstorage):
self.aslonglong = floatstorage
@@ -229,9 +222,6 @@
floatvalue = longlong.getrealfloat(self.aslonglong)
return '<FloatImmedLoc(%s)>' % (floatvalue,)
- def location_code(self):
- raise NotImplementedError
-
if IS_X86_64:
def FloatImmedLoc(floatstorage):
from pypy.rlib.longlong2float import float2longlong
@@ -270,6 +260,11 @@
else:
raise AssertionError(methname + " undefined")
+def _missing_binary_insn(name, code1, code2):
+ raise AssertionError(name + "_" + code1 + code2 + " missing")
+_missing_binary_insn._dont_inline_ = True
+
+
class LocationCodeBuilder(object):
_mixin_ = True
@@ -303,6 +298,8 @@
else:
# For this case, we should not need the scratch register more than here.
self._load_scratch(val2)
+ if name == 'MOV' and loc1 is X86_64_SCRATCH_REG:
+ return # don't need a dummy "MOV r11, r11"
INSN(self, loc1, X86_64_SCRATCH_REG)
def invoke(self, codes, val1, val2):
@@ -310,6 +307,23 @@
_rx86_getattr(self, methname)(val1, val2)
invoke._annspecialcase_ = 'specialize:arg(1)'
+ def has_implementation_for(loc1, loc2):
+ # A memo function that returns True if there is any NAME_xy that could match.
+ # If it returns False we know the whole subcase can be omitted from translated
+ # code. Without this hack, the size of most _binaryop INSN functions ends up
+ # quite large in C code.
+ if loc1 == '?':
+ return any([has_implementation_for(loc1, loc2)
+ for loc1 in unrolling_location_codes])
+ methname = name + "_" + loc1 + loc2
+ if not hasattr(rx86.AbstractX86CodeBuilder, methname):
+ return False
+ # any NAME_j should have a NAME_m as a fallback, too. Check it
+ if loc1 == 'j': assert has_implementation_for('m', loc2), methname
+ if loc2 == 'j': assert has_implementation_for(loc1, 'm'), methname
+ return True
+ has_implementation_for._annspecialcase_ = 'specialize:memo'
+
def INSN(self, loc1, loc2):
code1 = loc1.location_code()
code2 = loc2.location_code()
@@ -325,6 +339,8 @@
assert code2 not in ('j', 'i')
for possible_code2 in unrolling_location_codes:
+ if not has_implementation_for('?', possible_code2):
+ continue
if code2 == possible_code2:
val2 = getattr(loc2, "value_" + possible_code2)()
#
@@ -335,28 +351,32 @@
#
# Regular case
for possible_code1 in unrolling_location_codes:
+ if not has_implementation_for(possible_code1,
+ possible_code2):
+ continue
if code1 == possible_code1:
val1 = getattr(loc1, "value_" + possible_code1)()
# More faking out of certain operations for x86_64
- if possible_code1 == 'j' and not rx86.fits_in_32bits(val1):
+ fits32 = rx86.fits_in_32bits
+ if possible_code1 == 'j' and not fits32(val1):
val1 = self._addr_as_reg_offset(val1)
invoke(self, "m" + possible_code2, val1, val2)
- elif possible_code2 == 'j' and not rx86.fits_in_32bits(val2):
+ return
+ if possible_code2 == 'j' and not fits32(val2):
val2 = self._addr_as_reg_offset(val2)
invoke(self, possible_code1 + "m", val1, val2)
- elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]):
+ return
+ if possible_code1 == 'm' and not fits32(val1[1]):
val1 = self._fix_static_offset_64_m(val1)
- invoke(self, "a" + possible_code2, val1, val2)
- elif possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]):
+ if possible_code2 == 'm' and not fits32(val2[1]):
val2 = self._fix_static_offset_64_m(val2)
- invoke(self, possible_code1 + "a", val1, val2)
- else:
- if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]):
- val1 = self._fix_static_offset_64_a(val1)
- if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]):
- val2 = self._fix_static_offset_64_a(val2)
- invoke(self, possible_code1 + possible_code2, val1, val2)
+ if possible_code1 == 'a' and not fits32(val1[3]):
+ val1 = self._fix_static_offset_64_a(val1)
+ if possible_code2 == 'a' and not fits32(val2[3]):
+ val2 = self._fix_static_offset_64_a(val2)
+ invoke(self, possible_code1 + possible_code2, val1, val2)
return
+ _missing_binary_insn(name, code1, code2)
return func_with_new_name(INSN, "INSN_" + name)
@@ -431,12 +451,14 @@
def _fix_static_offset_64_m(self, (basereg, static_offset)):
# For cases where an AddressLoc has the location_code 'm', but
# where the static offset does not fit in 32-bits. We have to fall
- # back to the X86_64_SCRATCH_REG. Note that this returns a location
- # encoded as mode 'a'. These are all possibly rare cases; don't try
+ # back to the X86_64_SCRATCH_REG. Returns a new location encoded
+ # as mode 'm' too. These are all possibly rare cases; don't try
# to reuse a past value of the scratch register at all.
self._scratch_register_known = False
self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset)
- return (basereg, X86_64_SCRATCH_REG.value, 0, 0)
+ self.LEA_ra(X86_64_SCRATCH_REG.value,
+ (basereg, X86_64_SCRATCH_REG.value, 0, 0))
+ return (X86_64_SCRATCH_REG.value, 0)
def _fix_static_offset_64_a(self, (basereg, scalereg,
scale, static_offset)):
diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
--- a/pypy/jit/backend/x86/rx86.py
+++ b/pypy/jit/backend/x86/rx86.py
@@ -745,6 +745,7 @@
assert insnname_template.count('*') == 1
add_insn('x', register(2), '\xC0')
add_insn('j', abs_, immediate(2))
+ add_insn('m', mem_reg_plus_const(2))
define_pxmm_insn('PADDQ_x*', '\xD4')
define_pxmm_insn('PSUBQ_x*', '\xFB')
diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py
--- a/pypy/jit/backend/x86/test/test_regloc.py
+++ b/pypy/jit/backend/x86/test/test_regloc.py
@@ -146,8 +146,10 @@
expected_instructions = (
# mov r11, 0xFEDCBA9876543210
'\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
- # mov rcx, [rdx+r11]
- '\x4A\x8B\x0C\x1A'
+ # lea r11, [rdx+r11]
+ '\x4E\x8D\x1C\x1A'
+ # mov rcx, [r11]
+ '\x49\x8B\x0B'
)
assert cb.getvalue() == expected_instructions
@@ -174,6 +176,30 @@
# ------------------------------------------------------------
+ def test_MOV_64bit_constant_into_r11(self):
+ base_constant = 0xFEDCBA9876543210
+ cb = LocationCodeBuilder64()
+ cb.MOV(r11, imm(base_constant))
+
+ expected_instructions = (
+ # mov r11, 0xFEDCBA9876543210
+ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+ )
+ assert cb.getvalue() == expected_instructions
+
+ def test_MOV_64bit_address_into_r11(self):
+ base_addr = 0xFEDCBA9876543210
+ cb = LocationCodeBuilder64()
+ cb.MOV(r11, heap(base_addr))
+
+ expected_instructions = (
+ # mov r11, 0xFEDCBA9876543210
+ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' +
+ # mov r11, [r11]
+ '\x4D\x8B\x1B'
+ )
+ assert cb.getvalue() == expected_instructions
+
def test_MOV_immed32_into_64bit_address_1(self):
immed = -0x01234567
base_addr = 0xFEDCBA9876543210
@@ -217,8 +243,10 @@
expected_instructions = (
# mov r11, 0xFEDCBA9876543210
'\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
- # mov [rdx+r11], -0x01234567
- '\x4A\xC7\x04\x1A\x99\xBA\xDC\xFE'
+ # lea r11, [rdx+r11]
+ '\x4E\x8D\x1C\x1A'
+ # mov [r11], -0x01234567
+ '\x49\xC7\x03\x99\xBA\xDC\xFE'
)
assert cb.getvalue() == expected_instructions
@@ -300,8 +328,10 @@
'\x48\xBA\xEF\xCD\xAB\x89\x67\x45\x23\x01'
# mov r11, 0xFEDCBA9876543210
'\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
- # mov [rax+r11], rdx
- '\x4A\x89\x14\x18'
+ # lea r11, [rax+r11]
+ '\x4E\x8D\x1C\x18'
+ # mov [r11], rdx
+ '\x49\x89\x13'
# pop rdx
'\x5A'
)
diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
--- a/pypy/jit/metainterp/optimizeopt/heap.py
+++ b/pypy/jit/metainterp/optimizeopt/heap.py
@@ -43,7 +43,7 @@
optheap.optimizer.ensure_imported(cached_fieldvalue)
cached_fieldvalue = self._cached_fields.get(structvalue, None)
- if cached_fieldvalue is not fieldvalue:
+ if not fieldvalue.same_value(cached_fieldvalue):
# common case: store the 'op' as lazy_setfield, and register
# myself in the optheap's _lazy_setfields_and_arrayitems list
self._lazy_setfield = op
@@ -140,6 +140,15 @@
getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)],
result, op.getdescr())
shortboxes.add_potential(getop, synthetic=True)
+ if op.getopnum() == rop.SETARRAYITEM_GC:
+ result = op.getarg(2)
+ if isinstance(result, Const):
+ newresult = result.clonebox()
+ optimizer.make_constant(newresult, result)
+ result = newresult
+ getop = ResOperation(rop.GETARRAYITEM_GC, [op.getarg(0), op.getarg(1)],
+ result, op.getdescr())
+ shortboxes.add_potential(getop, synthetic=True)
elif op.result is not None:
shortboxes.add_potential(op)
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -1,6 +1,6 @@
from pypy.jit.metainterp import jitprof, resume, compile
from pypy.jit.metainterp.executor import execute_nonspec
-from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF
+from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF, INT
from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \
ImmutableIntUnbounded, \
IntLowerBound, MININT, MAXINT
@@ -95,6 +95,10 @@
return guards
def import_from(self, other, optimizer):
+ if self.level == LEVEL_CONSTANT:
+ assert other.level == LEVEL_CONSTANT
+ assert other.box.same_constant(self.box)
+ return
assert self.level <= LEVEL_NONNULL
if other.level == LEVEL_CONSTANT:
self.make_constant(other.get_key_box())
@@ -141,6 +145,13 @@
return not box.nonnull()
return False
+ def same_value(self, other):
+ if not other:
+ return False
+ if self.is_constant() and other.is_constant():
+ return self.box.same_constant(other.box)
+ return self is other
+
def make_constant(self, constbox):
"""Replace 'self.box' with a Const box."""
assert isinstance(constbox, Const)
@@ -326,6 +337,7 @@
self.bridge = bridge
self.values = {}
self.interned_refs = self.cpu.ts.new_ref_dict()
+ self.interned_ints = {}
self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd)
self.bool_boxes = {}
self.producer = {}
@@ -398,6 +410,9 @@
if not value:
return box
return self.interned_refs.setdefault(value, box)
+ #elif constbox.type == INT:
+ # value = constbox.getint()
+ # return self.interned_ints.setdefault(value, box)
else:
return box
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -4225,6 +4225,27 @@
"""
self.optimize_strunicode_loop(ops, expected)
+ def test_str_slice_plain_virtual(self):
+ ops = """
+ []
+ p0 = newstr(11)
+ copystrcontent(s"hello world", p0, 0, 0, 11)
+ p1 = call(0, p0, 0, 5, descr=strslicedescr)
+ finish(p1)
+ """
+ expected = """
+ []
+ p0 = newstr(11)
+ copystrcontent(s"hello world", p0, 0, 0, 11)
+ # Eventually this should just return s"hello", but ATM this test is
+ # just verifying that it doesn't return "\0\0\0\0\0", so being
+ # slightly underoptimized is ok.
+ p1 = newstr(5)
+ copystrcontent(p0, p1, 0, 0, 5)
+ finish(p1)
+ """
+ self.optimize_strunicode_loop(ops, expected)
+
# ----------
def optimize_strunicode_loop_extradescrs(self, ops, optops):
class FakeCallInfoCollection:
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -7355,6 +7355,150 @@
"""
self.optimize_loop(ops, expected)
+ def test_repeated_constant_setfield_mixed_with_guard(self):
+ ops = """
+ [p22, p18]
+ setfield_gc(p22, 2, descr=valuedescr)
+ guard_nonnull_class(p18, ConstClass(node_vtable)) []
+ setfield_gc(p22, 2, descr=valuedescr)
+ jump(p22, p18)
+ """
+ preamble = """
+ [p22, p18]
+ setfield_gc(p22, 2, descr=valuedescr)
+ guard_nonnull_class(p18, ConstClass(node_vtable)) []
+ jump(p22, p18)
+ """
+ short = """
+ [p22, p18]
+ i1 = getfield_gc(p22, descr=valuedescr)
+ guard_value(i1, 2) []
+ jump(p22, p18)
+ """
+ expected = """
+ [p22, p18]
+ jump(p22, p18)
+ """
+ self.optimize_loop(ops, expected, preamble, expected_short=short)
+
+ def test_repeated_setfield_mixed_with_guard(self):
+ ops = """
+ [p22, p18, i1]
+ i2 = getfield_gc(p22, descr=valuedescr)
+ call(i2, descr=nonwritedescr)
+ setfield_gc(p22, i1, descr=valuedescr)
+ guard_nonnull_class(p18, ConstClass(node_vtable)) []
+ setfield_gc(p22, i1, descr=valuedescr)
+ jump(p22, p18, i1)
+ """
+ preamble = """
+ [p22, p18, i1]
+ i2 = getfield_gc(p22, descr=valuedescr)
+ call(i2, descr=nonwritedescr)
+ setfield_gc(p22, i1, descr=valuedescr)
+ guard_nonnull_class(p18, ConstClass(node_vtable)) []
+ jump(p22, p18, i1, i1)
+ """
+ short = """
+ [p22, p18, i1]
+ i2 = getfield_gc(p22, descr=valuedescr)
+ jump(p22, p18, i1, i2)
+ """
+ expected = """
+ [p22, p18, i1, i2]
+ call(i2, descr=nonwritedescr)
+ setfield_gc(p22, i1, descr=valuedescr)
+ jump(p22, p18, i1, i1)
+ """
+ self.optimize_loop(ops, expected, preamble, expected_short=short)
+
+ def test_cache_setfield_across_loop_boundaries(self):
+ ops = """
+ [p1]
+ p2 = getfield_gc(p1, descr=valuedescr)
+ guard_nonnull_class(p2, ConstClass(node_vtable)) []
+ call(p2, descr=nonwritedescr)
+ p3 = new_with_vtable(ConstClass(node_vtable))
+ setfield_gc(p1, p3, descr=valuedescr)
+ jump(p1)
+ """
+ expected = """
+ [p1, p2]
+ call(p2, descr=nonwritedescr)
+ p3 = new_with_vtable(ConstClass(node_vtable))
+ setfield_gc(p1, p3, descr=valuedescr)
+ jump(p1, p3)
+ """
+ self.optimize_loop(ops, expected)
+
+ def test_cache_setarrayitem_across_loop_boundaries(self):
+ ops = """
+ [p1]
+ p2 = getarrayitem_gc(p1, 3, descr=arraydescr)
+ guard_nonnull_class(p2, ConstClass(node_vtable)) []
+ call(p2, descr=nonwritedescr)
+ p3 = new_with_vtable(ConstClass(node_vtable))
+ setarrayitem_gc(p1, 3, p3, descr=arraydescr)
+ jump(p1)
+ """
+ expected = """
+ [p1, p2]
+ call(p2, descr=nonwritedescr)
+ p3 = new_with_vtable(ConstClass(node_vtable))
+ setarrayitem_gc(p1, 3, p3, descr=arraydescr)
+ jump(p1, p3)
+ """
+ self.optimize_loop(ops, expected)
+
+ def test_setarrayitem_p0_p0(self):
+ ops = """
+ [i0, i1]
+ p0 = escape()
+ setarrayitem_gc(p0, 2, p0, descr=arraydescr)
+ jump(i0, i1)
+ """
+ expected = """
+ [i0, i1]
+ p0 = escape()
+ setarrayitem_gc(p0, 2, p0, descr=arraydescr)
+ jump(i0, i1)
+ """
+ self.optimize_loop(ops, expected)
+
+ def test_setfield_p0_p0(self):
+ ops = """
+ [i0, i1]
+ p0 = escape()
+ setfield_gc(p0, p0, descr=arraydescr)
+ jump(i0, i1)
+ """
+ expected = """
+ [i0, i1]
+ p0 = escape()
+ setfield_gc(p0, p0, descr=arraydescr)
+ jump(i0, i1)
+ """
+ self.optimize_loop(ops, expected)
+
+ def test_setfield_p0_p1_p0(self):
+ ops = """
+ [i0, i1]
+ p0 = escape()
+ p1 = escape()
+ setfield_gc(p0, p1, descr=adescr)
+ setfield_gc(p1, p0, descr=bdescr)
+ jump(i0, i1)
+ """
+ expected = """
+ [i0, i1]
+ p0 = escape()
+ p1 = escape()
+ setfield_gc(p0, p1, descr=adescr)
+ setfield_gc(p1, p0, descr=bdescr)
+ jump(i0, i1)
+ """
+ self.optimize_loop(ops, expected)
+
class TestLLtype(OptimizeOptTest, LLtypeMixin):
pass
diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py
--- a/pypy/jit/metainterp/optimizeopt/virtualstate.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py
@@ -551,6 +551,7 @@
optimizer.produce_potential_short_preamble_ops(self)
self.short_boxes = {}
+ self.short_boxes_in_production = {}
for box in self.potential_ops.keys():
try:
@@ -606,6 +607,10 @@
return
if isinstance(box, Const):
return
+ if box in self.short_boxes_in_production:
+ raise BoxNotProducable
+ self.short_boxes_in_production[box] = True
+
if box in self.potential_ops:
ops = self.prioritized_alternatives(box)
produced_one = False
diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py
--- a/pypy/jit/metainterp/optimizeopt/vstring.py
+++ b/pypy/jit/metainterp/optimizeopt/vstring.py
@@ -505,11 +505,17 @@
#
if (isinstance(vstr, VStringPlainValue) and vstart.is_constant()
and vstop.is_constant()):
- # slicing with constant bounds of a VStringPlainValue
- value = self.make_vstring_plain(op.result, op, mode)
- value.setup_slice(vstr._chars, vstart.box.getint(),
- vstop.box.getint())
- return True
+ # slicing with constant bounds of a VStringPlainValue, if any of
+ # the characters is unitialized we don't do this special slice, we
+ # do the regular copy contents.
+ for i in range(vstart.box.getint(), vstop.box.getint()):
+ if vstr.getitem(i) is optimizer.CVAL_UNINITIALIZED_ZERO:
+ break
+ else:
+ value = self.make_vstring_plain(op.result, op, mode)
+ value.setup_slice(vstr._chars, vstart.box.getint(),
+ vstop.box.getint())
+ return True
#
vstr.ensure_nonnull()
lengthbox = _int_sub(self, vstop.force_box(self),
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -312,10 +312,11 @@
class W_XRange(Wrappable):
- def __init__(self, space, start, len, step):
+ def __init__(self, space, start, stop, step):
self.space = space
self.start = start
- self.len = len
+ self.stop = stop
+ self.len = get_len_of_range(space, start, stop, step)
self.step = step
def descr_new(space, w_subtype, w_start, w_stop=None, w_step=1):
@@ -325,9 +326,8 @@
start, stop = 0, start
else:
stop = _toint(space, w_stop)
- howmany = get_len_of_range(space, start, stop, step)
obj = space.allocate_instance(W_XRange, w_subtype)
- W_XRange.__init__(obj, space, start, howmany, step)
+ W_XRange.__init__(obj, space, start, stop, step)
return space.wrap(obj)
def descr_repr(self):
@@ -357,12 +357,12 @@
def descr_iter(self):
return self.space.wrap(W_XRangeIterator(self.space, self.start,
- self.len, self.step))
+ self.stop, self.step))
def descr_reversed(self):
lastitem = self.start + (self.len-1) * self.step
return self.space.wrap(W_XRangeIterator(self.space, lastitem,
- self.len, -self.step))
+ self.start, -self.step, True))
def descr_reduce(self):
space = self.space
@@ -389,25 +389,29 @@
)
class W_XRangeIterator(Wrappable):
- def __init__(self, space, current, remaining, step):
+ def __init__(self, space, start, stop, step, inclusive=False):
self.space = space
- self.current = current
- self.remaining = remaining
+ self.current = start
+ self.stop = stop
self.step = step
+ self.inclusive = inclusive
def descr_iter(self):
return self.space.wrap(self)
def descr_next(self):
- if self.remaining > 0:
- item = self.current
- self.current = item + self.step
- self.remaining -= 1
- return self.space.wrap(item)
- raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ if self.inclusive:
+ if not ((self.step > 0 and self.current <= self.stop) or (self.step < 0 and self.current >= self.stop)):
+ raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ else:
+ if not ((self.step > 0 and self.current < self.stop) or (self.step < 0 and self.current > self.stop)):
+ raise OperationError(self.space.w_StopIteration, self.space.w_None)
+ item = self.current
+ self.current = item + self.step
+ return self.space.wrap(item)
- def descr_len(self):
- return self.space.wrap(self.remaining)
+ #def descr_len(self):
+ # return self.space.wrap(self.remaining)
def descr_reduce(self):
from pypy.interpreter.mixedmodule import MixedModule
@@ -418,7 +422,7 @@
w = space.wrap
nt = space.newtuple
- tup = [w(self.current), w(self.remaining), w(self.step)]
+ tup = [w(self.current), w(self.stop), w(self.step)]
return nt([new_inst, nt(tup)])
W_XRangeIterator.typedef = TypeDef("rangeiterator",
diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py
--- a/pypy/module/__builtin__/test/test_functional.py
+++ b/pypy/module/__builtin__/test/test_functional.py
@@ -157,7 +157,8 @@
raises(OverflowError, xrange, a)
raises(OverflowError, xrange, 0, a)
raises(OverflowError, xrange, 0, 1, a)
-
+ assert list(reversed(xrange(-sys.maxint-1, -sys.maxint-1, -2))) == []
+
def test_xrange_reduce(self):
x = xrange(2, 9, 3)
callable, args = x.__reduce__()
diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -4,32 +4,44 @@
from pypy.interpreter.error import OperationError
from pypy.tool.sourcetools import func_renamer
from pypy.interpreter.baseobjspace import Wrappable
-from pypy.rpython.lltypesystem import lltype, rffi
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
+from pypy.rlib import rgc, ropenssl
from pypy.rlib.objectmodel import keepalive_until_here
-from pypy.rlib import ropenssl
from pypy.rlib.rstring import StringBuilder
from pypy.module.thread.os_lock import Lock
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
+# HASH_MALLOC_SIZE is the size of EVP_MD, EVP_MD_CTX plus their points
+# Used for adding memory pressure. Last number is an (under?)estimate of
+# EVP_PKEY_CTX's size.
+# XXX: Make a better estimate here
+HASH_MALLOC_SIZE = ropenssl.EVP_MD_SIZE + ropenssl.EVP_MD_CTX_SIZE \
+ + rffi.sizeof(ropenssl.EVP_MD) * 2 + 208
+
class W_Hash(Wrappable):
ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
+ _block_size = -1
def __init__(self, space, name):
self.name = name
+ self.digest_size = self.compute_digest_size()
# Allocate a lock for each HASH object.
# An optimization would be to not release the GIL on small requests,
# and use a custom lock only when needed.
self.lock = Lock(space)
+ ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw')
+ rgc.add_memory_pressure(HASH_MALLOC_SIZE + self.digest_size)
+ self.ctx = ctx
+
+ def initdigest(self, space, name):
digest = ropenssl.EVP_get_digestbyname(name)
if not digest:
raise OperationError(space.w_ValueError,
space.wrap("unknown hash function"))
- ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO, flavor='raw')
- ropenssl.EVP_DigestInit(ctx, digest)
- self.ctx = ctx
+ ropenssl.EVP_DigestInit(self.ctx, digest)
def __del__(self):
# self.lock.free()
@@ -65,33 +77,29 @@
"Return the digest value as a string of hexadecimal digits."
digest = self._digest(space)
hexdigits = '0123456789abcdef'
- result = StringBuilder(self._digest_size() * 2)
+ result = StringBuilder(self.digest_size * 2)
for c in digest:
result.append(hexdigits[(ord(c) >> 4) & 0xf])
result.append(hexdigits[ ord(c) & 0xf])
return space.wrap(result.build())
def get_digest_size(self, space):
- return space.wrap(self._digest_size())
+ return space.wrap(self.digest_size)
def get_block_size(self, space):
- return space.wrap(self._block_size())
+ return space.wrap(self.compute_block_size())
def _digest(self, space):
- copy = self.copy(space)
- ctx = copy.ctx
- digest_size = self._digest_size()
- digest = lltype.malloc(rffi.CCHARP.TO, digest_size, flavor='raw')
+ with lltype.scoped_alloc(ropenssl.EVP_MD_CTX.TO) as ctx:
+ with self.lock:
+ ropenssl.EVP_MD_CTX_copy(ctx, self.ctx)
+ digest_size = self.digest_size
+ with lltype.scoped_alloc(rffi.CCHARP.TO, digest_size) as digest:
+ ropenssl.EVP_DigestFinal(ctx, digest, None)
+ ropenssl.EVP_MD_CTX_cleanup(ctx)
+ return rffi.charpsize2str(digest, digest_size)
- try:
- ropenssl.EVP_DigestFinal(ctx, digest, None)
- return rffi.charpsize2str(digest, digest_size)
- finally:
- keepalive_until_here(copy)
- lltype.free(digest, flavor='raw')
-
-
- def _digest_size(self):
+ def compute_digest_size(self):
# XXX This isn't the nicest way, but the EVP_MD_size OpenSSL
# XXX function is defined as a C macro on OS X and would be
# XXX significantly harder to implement in another way.
@@ -105,12 +113,14 @@
'sha512': 64, 'SHA512': 64,
}.get(self.name, 0)
- def _block_size(self):
+ def compute_block_size(self):
+ if self._block_size != -1:
+ return self._block_size
# XXX This isn't the nicest way, but the EVP_MD_CTX_block_size
# XXX OpenSSL function is defined as a C macro on some systems
# XXX and would be significantly harder to implement in
# XXX another way.
- return {
+ self._block_size = {
'md5': 64, 'MD5': 64,
'sha1': 64, 'SHA1': 64,
'sha224': 64, 'SHA224': 64,
@@ -118,6 +128,7 @@
'sha384': 128, 'SHA384': 128,
'sha512': 128, 'SHA512': 128,
}.get(self.name, 0)
+ return self._block_size
W_Hash.typedef = TypeDef(
'HASH',
@@ -135,6 +146,7 @@
@unwrap_spec(name=str, string='bufferstr')
def new(space, name, string=''):
w_hash = W_Hash(space, name)
+ w_hash.initdigest(space, name)
w_hash.update(space, string)
return space.wrap(w_hash)
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -4,6 +4,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.error import wrap_oserror, OperationError
from pypy.rpython.lltypesystem import rffi, lltype
+from pypy.rlib import rgc
from pypy.rlib.rarithmetic import r_uint
from pypy.translator.tool.cbuild import ExternalCompilationInfo
from pypy.rpython.tool import rffi_platform as platform
@@ -23,6 +24,8 @@
_CreateSemaphore = rwin32.winexternal(
'CreateSemaphoreA', [rffi.VOIDP, rffi.LONG, rffi.LONG, rwin32.LPCSTR],
rwin32.HANDLE)
+ _CloseHandle = rwin32.winexternal('CloseHandle', [rwin32.HANDLE],
+ rwin32.BOOL, threadsafe=False)
_ReleaseSemaphore = rwin32.winexternal(
'ReleaseSemaphore', [rwin32.HANDLE, rffi.LONG, rffi.LONGP],
rwin32.BOOL)
@@ -51,6 +54,7 @@
SEM_FAILED = platform.ConstantInteger('SEM_FAILED')
SEM_VALUE_MAX = platform.ConstantInteger('SEM_VALUE_MAX')
SEM_TIMED_WAIT = platform.Has('sem_timedwait')
+ SEM_T_SIZE = platform.SizeOf('sem_t')
config = platform.configure(CConfig)
TIMEVAL = config['TIMEVAL']
@@ -61,18 +65,21 @@
SEM_FAILED = config['SEM_FAILED'] # rffi.cast(SEM_T, config['SEM_FAILED'])
SEM_VALUE_MAX = config['SEM_VALUE_MAX']
SEM_TIMED_WAIT = config['SEM_TIMED_WAIT']
+ SEM_T_SIZE = config['SEM_T_SIZE']
if sys.platform == 'darwin':
HAVE_BROKEN_SEM_GETVALUE = True
else:
HAVE_BROKEN_SEM_GETVALUE = False
- def external(name, args, result):
+ def external(name, args, result, **kwargs):
return rffi.llexternal(name, args, result,
- compilation_info=eci)
+ compilation_info=eci, **kwargs)
_sem_open = external('sem_open',
[rffi.CCHARP, rffi.INT, rffi.INT, rffi.UINT],
SEM_T)
+ # tread sem_close as not threadsafe for now to be able to use the __del__
+ _sem_close = external('sem_close', [SEM_T], rffi.INT, threadsafe=False)
_sem_unlink = external('sem_unlink', [rffi.CCHARP], rffi.INT)
_sem_wait = external('sem_wait', [SEM_T], rffi.INT)
_sem_trywait = external('sem_trywait', [SEM_T], rffi.INT)
@@ -90,6 +97,11 @@
raise OSError(rposix.get_errno(), "sem_open failed")
return res
+ def sem_close(handle):
+ res = _sem_close(handle)
+ if res < 0:
+ raise OSError(rposix.get_errno(), "sem_close failed")
+
def sem_unlink(name):
res = _sem_unlink(name)
if res < 0:
@@ -205,6 +217,11 @@
raise WindowsError(err, "CreateSemaphore")
return handle
+ def delete_semaphore(handle):
+ if not _CloseHandle(handle):
+ err = rwin32.GetLastError()
+ raise WindowsError(err, "CloseHandle")
+
def semlock_acquire(self, space, block, w_timeout):
if not block:
full_msecs = 0
@@ -291,8 +308,13 @@
sem_unlink(name)
except OSError:
pass
+ else:
+ rgc.add_memory_pressure(SEM_T_SIZE)
return sem
+ def delete_semaphore(handle):
+ sem_close(handle)
+
def semlock_acquire(self, space, block, w_timeout):
if not block:
deadline = lltype.nullptr(TIMESPECP.TO)
@@ -483,6 +505,9 @@
def exit(self, space, __args__):
self.release(space)
+ def __del__(self):
+ delete_semaphore(self.handle)
+
@unwrap_spec(kind=int, value=int, maxvalue=int)
def descr_new(space, w_subtype, kind, value, maxvalue):
if kind != RECURSIVE_MUTEX and kind != SEMAPHORE:
diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py
--- a/pypy/module/_pickle_support/maker.py
+++ b/pypy/module/_pickle_support/maker.py
@@ -66,10 +66,10 @@
new_generator.running = running
return space.wrap(new_generator)
- at unwrap_spec(current=int, remaining=int, step=int)
-def xrangeiter_new(space, current, remaining, step):
+ at unwrap_spec(current=int, stop=int, step=int)
+def xrangeiter_new(space, current, stop, step):
from pypy.module.__builtin__.functional import W_XRangeIterator
- new_iter = W_XRangeIterator(space, current, remaining, step)
+ new_iter = W_XRangeIterator(space, current, stop, step)
return space.wrap(new_iter)
@unwrap_spec(identifier=str)
diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py
--- a/pypy/module/pyexpat/interp_pyexpat.py
+++ b/pypy/module/pyexpat/interp_pyexpat.py
@@ -4,9 +4,9 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.error import OperationError
from pypy.objspace.descroperation import object_setattr
+from pypy.rlib import rgc
+from pypy.rlib.unroll import unrolling_iterable
from pypy.rpython.lltypesystem import rffi, lltype
-from pypy.rlib.unroll import unrolling_iterable
-
from pypy.rpython.tool import rffi_platform
from pypy.translator.tool.cbuild import ExternalCompilationInfo
from pypy.translator.platform import platform
@@ -118,6 +118,19 @@
locals()[name] = rffi_platform.ConstantInteger(name)
for name in xml_model_list:
locals()[name] = rffi_platform.ConstantInteger(name)
+ for name in xml_model_list:
+ locals()[name] = rffi_platform.ConstantInteger(name)
+ for name in xml_model_list:
+ locals()[name] = rffi_platform.ConstantInteger(name)
+ for name in xml_model_list:
+ locals()[name] = rffi_platform.ConstantInteger(name)
+ for name in xml_model_list:
+ locals()[name] = rffi_platform.ConstantInteger(name)
+ for name in xml_model_list:
+ locals()[name] = rffi_platform.ConstantInteger(name)
+ for name in xml_model_list:
+ locals()[name] = rffi_platform.ConstantInteger(name)
+ XML_Parser_SIZE = rffi_platform.SizeOf("XML_Parser")
for k, v in rffi_platform.configure(CConfigure).items():
globals()[k] = v
@@ -793,7 +806,10 @@
rffi.cast(rffi.CHAR, namespace_separator))
else:
xmlparser = XML_ParserCreate(encoding)
-
+ # Currently this is just the size of the pointer and some estimated bytes.
+ # The struct isn't actually defined in expat.h - it is in xmlparse.c
+ # XXX: find a good estimate of the XML_ParserStruct
+ rgc.add_memory_pressure(XML_Parser_SIZE + 300)
if not xmlparser:
raise OperationError(space.w_RuntimeError,
space.wrap('XML_ParserCreate failed'))
diff --git a/pypy/module/thread/ll_thread.py b/pypy/module/thread/ll_thread.py
--- a/pypy/module/thread/ll_thread.py
+++ b/pypy/module/thread/ll_thread.py
@@ -2,10 +2,11 @@
from pypy.rpython.lltypesystem import rffi, lltype, llmemory
from pypy.translator.tool.cbuild import ExternalCompilationInfo
import py
-from pypy.rlib import jit
+from pypy.rlib import jit, rgc
from pypy.rlib.debug import ll_assert
from pypy.rlib.objectmodel import we_are_translated
from pypy.rpython.lltypesystem.lloperation import llop
+from pypy.rpython.tool import rffi_platform
from pypy.tool import autopath
class error(Exception):
@@ -49,7 +50,7 @@
TLOCKP = rffi.COpaquePtr('struct RPyOpaque_ThreadLock',
compilation_info=eci)
-
+TLOCKP_SIZE = rffi_platform.sizeof('struct RPyOpaque_ThreadLock', eci)
c_thread_lock_init = llexternal('RPyThreadLockInit', [TLOCKP], rffi.INT,
threadsafe=False) # may add in a global list
c_thread_lock_dealloc_NOAUTO = llexternal('RPyOpaqueDealloc_ThreadLock',
@@ -164,6 +165,9 @@
if rffi.cast(lltype.Signed, res) <= 0:
lltype.free(ll_lock, flavor='raw', track_allocation=False)
raise error("out of resources")
+ # Add some memory pressure for the size of the lock because it is an
+ # Opaque object
+ rgc.add_memory_pressure(TLOCKP_SIZE)
return ll_lock
def free_ll_lock(ll_lock):
diff --git a/pypy/rlib/rgc.py b/pypy/rlib/rgc.py
--- a/pypy/rlib/rgc.py
+++ b/pypy/rlib/rgc.py
@@ -259,6 +259,24 @@
except Exception:
return False # don't keep objects whose _freeze_() method explodes
+def add_memory_pressure(estimate):
+ """Add memory pressure for OpaquePtrs."""
+ pass
+
+class AddMemoryPressureEntry(ExtRegistryEntry):
+ _about_ = add_memory_pressure
+
+ def compute_result_annotation(self, s_nbytes):
+ from pypy.annotation import model as annmodel
+ return annmodel.s_None
+
+ def specialize_call(self, hop):
+ [v_size] = hop.inputargs(lltype.Signed)
+ hop.exception_cannot_occur()
+ return hop.genop('gc_add_memory_pressure', [v_size],
+ resulttype=lltype.Void)
+
+
def get_rpy_memory_usage(gcref):
"NOT_RPYTHON"
# approximate implementation using CPython's type info
diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py
--- a/pypy/rlib/ropenssl.py
+++ b/pypy/rlib/ropenssl.py
@@ -25,6 +25,7 @@
'openssl/err.h',
'openssl/rand.h',
'openssl/evp.h',
+ 'openssl/ossl_typ.h',
'openssl/x509v3.h']
eci = ExternalCompilationInfo(
@@ -108,7 +109,9 @@
GENERAL_NAME_st = rffi_platform.Struct(
'struct GENERAL_NAME_st',
[('type', rffi.INT),
- ])
+ ])
+ EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD')
+ EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX')
for k, v in rffi_platform.configure(CConfig).items():
@@ -154,7 +157,7 @@
ssl_external('CRYPTO_set_id_callback',
[lltype.Ptr(lltype.FuncType([], rffi.LONG))],
lltype.Void)
-
+
if HAVE_OPENSSL_RAND:
ssl_external('RAND_add', [rffi.CCHARP, rffi.INT, rffi.DOUBLE], lltype.Void)
ssl_external('RAND_status', [], rffi.INT)
@@ -255,7 +258,7 @@
[BIO, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], X509)
EVP_MD_CTX = rffi.COpaquePtr('EVP_MD_CTX', compilation_info=eci)
-EVP_MD = rffi.COpaquePtr('EVP_MD')
+EVP_MD = rffi.COpaquePtr('EVP_MD', compilation_info=eci)
OpenSSL_add_all_digests = external(
'OpenSSL_add_all_digests', [], lltype.Void)
diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py
--- a/pypy/rpython/llinterp.py
+++ b/pypy/rpython/llinterp.py
@@ -172,7 +172,7 @@
def checkadr(addr):
assert lltype.typeOf(addr) is llmemory.Address
-
+
def is_inst(inst):
return isinstance(lltype.typeOf(inst), (ootype.Instance, ootype.BuiltinType, ootype.StaticMethod))
@@ -657,7 +657,7 @@
raise TypeError("graph with %r args called with wrong func ptr type: %r" %
(tuple([v.concretetype for v in args_v]), ARGS))
frame = self.newsubframe(graph, args)
- return frame.eval()
+ return frame.eval()
def op_direct_call(self, f, *args):
FTYPE = self.llinterpreter.typer.type_system.derefType(lltype.typeOf(f))
@@ -698,13 +698,13 @@
return ptr
except MemoryError:
self.make_llexception()
-
+
def op_malloc_nonmovable(self, TYPE, flags):
flavor = flags['flavor']
assert flavor == 'gc'
zero = flags.get('zero', False)
return self.heap.malloc_nonmovable(TYPE, zero=zero)
-
+
def op_malloc_nonmovable_varsize(self, TYPE, flags, size):
flavor = flags['flavor']
assert flavor == 'gc'
@@ -716,6 +716,9 @@
track_allocation = flags.get('track_allocation', True)
self.heap.free(obj, flavor='raw', track_allocation=track_allocation)
+ def op_gc_add_memory_pressure(self, size):
+ self.heap.add_memory_pressure(size)
+
def op_shrink_array(self, obj, smallersize):
return self.heap.shrink_array(obj, smallersize)
@@ -1318,7 +1321,7 @@
func_graph = fn.graph
else:
# obj is an instance, we want to call 'method_name' on it
- assert fn is None
+ assert fn is None
self_arg = [obj]
func_graph = obj._TYPE._methods[method_name._str].graph
diff --git a/pypy/rpython/lltypesystem/llheap.py b/pypy/rpython/lltypesystem/llheap.py
--- a/pypy/rpython/lltypesystem/llheap.py
+++ b/pypy/rpython/lltypesystem/llheap.py
@@ -5,8 +5,7 @@
setfield = setattr
from operator import setitem as setarrayitem
-from pypy.rlib.rgc import collect
-from pypy.rlib.rgc import can_move
+from pypy.rlib.rgc import can_move, collect, add_memory_pressure
def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue,
offsets=None):
diff --git a/pypy/rpython/lltypesystem/lloperation.py b/pypy/rpython/lltypesystem/lloperation.py
--- a/pypy/rpython/lltypesystem/lloperation.py
+++ b/pypy/rpython/lltypesystem/lloperation.py
@@ -473,6 +473,7 @@
'gc_is_rpy_instance' : LLOp(),
'gc_dump_rpy_heap' : LLOp(),
'gc_typeids_z' : LLOp(),
+ 'gc_add_memory_pressure': LLOp(),
# ------- JIT & GC interaction, only for some GCs ----------
diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py
--- a/pypy/rpython/lltypesystem/lltype.py
+++ b/pypy/rpython/lltypesystem/lltype.py
@@ -48,7 +48,7 @@
self.TYPE = TYPE
def __repr__(self):
return '<Uninitialized %r>'%(self.TYPE,)
-
+
def saferecursive(func, defl, TLS=TLS):
def safe(*args):
@@ -537,9 +537,9 @@
return "Func ( %s ) -> %s" % (args, self.RESULT)
__str__ = saferecursive(__str__, '...')
- def _short_name(self):
+ def _short_name(self):
args = ', '.join([ARG._short_name() for ARG in self.ARGS])
- return "Func(%s)->%s" % (args, self.RESULT._short_name())
+ return "Func(%s)->%s" % (args, self.RESULT._short_name())
_short_name = saferecursive(_short_name, '...')
def _container_example(self):
@@ -553,7 +553,7 @@
class OpaqueType(ContainerType):
_gckind = 'raw'
-
+
def __init__(self, tag, hints={}):
""" if hints['render_structure'] is set, the type is internal and not considered
to come from somewhere else (it should be rendered as a structure) """
@@ -723,10 +723,10 @@
def __str__(self):
return '* %s' % (self.TO, )
-
+
def _short_name(self):
return 'Ptr %s' % (self.TO._short_name(), )
-
+
def _is_atomic(self):
return self.TO._gckind == 'raw'
diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py
--- a/pypy/rpython/memory/gctransform/framework.py
+++ b/pypy/rpython/memory/gctransform/framework.py
@@ -377,17 +377,24 @@
self.malloc_varsize_nonmovable_ptr = None
if getattr(GCClass, 'raw_malloc_memory_pressure', False):
- def raw_malloc_memory_pressure(length, itemsize):
+ def raw_malloc_memory_pressure_varsize(length, itemsize):
totalmem = length * itemsize
if totalmem > 0:
gcdata.gc.raw_malloc_memory_pressure(totalmem)
#else: probably an overflow -- the following rawmalloc
# will fail then
+ def raw_malloc_memory_pressure(sizehint):
+ gcdata.gc.raw_malloc_memory_pressure(sizehint)
+ self.raw_malloc_memory_pressure_varsize_ptr = getfn(
+ raw_malloc_memory_pressure_varsize,
+ [annmodel.SomeInteger(), annmodel.SomeInteger()],
+ annmodel.s_None, minimal_transform = False)
self.raw_malloc_memory_pressure_ptr = getfn(
raw_malloc_memory_pressure,
- [annmodel.SomeInteger(), annmodel.SomeInteger()],
+ [annmodel.SomeInteger()],
annmodel.s_None, minimal_transform = False)
+
self.identityhash_ptr = getfn(GCClass.identityhash.im_func,
[s_gc, s_gcref],
annmodel.SomeInteger(),
diff --git a/pypy/rpython/memory/gctransform/transform.py b/pypy/rpython/memory/gctransform/transform.py
--- a/pypy/rpython/memory/gctransform/transform.py
+++ b/pypy/rpython/memory/gctransform/transform.py
@@ -63,7 +63,7 @@
gct.push_alive(v_result, self.llops)
elif opname not in ('direct_call', 'indirect_call'):
gct.push_alive(v_result, self.llops)
-
+
def rename(self, newopname):
@@ -118,7 +118,7 @@
self.minimalgctransformer = self.MinimalGCTransformer(self)
else:
self.minimalgctransformer = None
-
+
def get_lltype_of_exception_value(self):
if self.translator is not None:
exceptiondata = self.translator.rtyper.getexceptiondata()
@@ -399,7 +399,7 @@
def gct_gc_heap_stats(self, hop):
from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP
-
+
return hop.cast_result(rmodel.inputconst(lltype.Ptr(ARRAY_TYPEID_MAP),
lltype.nullptr(ARRAY_TYPEID_MAP)))
@@ -427,7 +427,7 @@
assert flavor == 'raw'
assert not flags.get('zero')
return self.parenttransformer.gct_malloc_varsize(hop)
-
+
def gct_free(self, hop):
flags = hop.spaceop.args[1].value
flavor = flags['flavor']
@@ -502,7 +502,7 @@
stack_mh = mallocHelpers()
stack_mh.allocate = lambda size: llop.stack_malloc(llmemory.Address, size)
ll_stack_malloc_fixedsize = stack_mh._ll_malloc_fixedsize
-
+
if self.translator:
self.raw_malloc_fixedsize_ptr = self.inittime_helper(
ll_raw_malloc_fixedsize, [lltype.Signed], llmemory.Address)
@@ -541,7 +541,7 @@
resulttype=llmemory.Address)
if flags.get('zero'):
hop.genop("raw_memclear", [v_raw, c_size])
- return v_raw
+ return v_raw
def gct_malloc_varsize(self, hop, add_flags=None):
flags = hop.spaceop.args[1].value
@@ -559,6 +559,14 @@
def gct_malloc_nonmovable_varsize(self, *args, **kwds):
return self.gct_malloc_varsize(*args, **kwds)
+ def gct_gc_add_memory_pressure(self, hop):
+ if hasattr(self, 'raw_malloc_memory_pressure_ptr'):
+ op = hop.spaceop
+ size = op.args[0]
+ return hop.genop("direct_call",
+ [self.raw_malloc_memory_pressure_ptr,
+ size])
+
def varsize_malloc_helper(self, hop, flags, meth, extraargs):
def intconst(c): return rmodel.inputconst(lltype.Signed, c)
op = hop.spaceop
@@ -590,9 +598,9 @@
def gct_fv_raw_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size,
c_offset_to_length):
if flags.get('add_memory_pressure', False):
- if hasattr(self, 'raw_malloc_memory_pressure_ptr'):
+ if hasattr(self, 'raw_malloc_memory_pressure_varsize_ptr'):
hop.genop("direct_call",
- [self.raw_malloc_memory_pressure_ptr,
+ [self.raw_malloc_memory_pressure_varsize_ptr,
v_length, c_item_size])
if c_offset_to_length is None:
if flags.get('zero'):
@@ -625,7 +633,7 @@
hop.genop("track_alloc_stop", [v])
hop.genop('raw_free', [v])
else:
- assert False, "%s has no support for free with flavor %r" % (self, flavor)
+ assert False, "%s has no support for free with flavor %r" % (self, flavor)
def gct_gc_can_move(self, hop):
return hop.cast_result(rmodel.inputconst(lltype.Bool, False))
diff --git a/pypy/rpython/memory/gcwrapper.py b/pypy/rpython/memory/gcwrapper.py
--- a/pypy/rpython/memory/gcwrapper.py
+++ b/pypy/rpython/memory/gcwrapper.py
@@ -66,6 +66,10 @@
gctypelayout.zero_gc_pointers(result)
return result
+ def add_memory_pressure(self, size):
+ if hasattr(self.gc, 'raw_malloc_memory_pressure'):
+ self.gc.raw_malloc_memory_pressure(size)
+
def shrink_array(self, p, smallersize):
if hasattr(self.gc, 'shrink_array'):
addr = llmemory.cast_ptr_to_adr(p)
diff --git a/pypy/rpython/memory/test/test_gc.py b/pypy/rpython/memory/test/test_gc.py
--- a/pypy/rpython/memory/test/test_gc.py
+++ b/pypy/rpython/memory/test/test_gc.py
@@ -592,7 +592,7 @@
return rgc.can_move(lltype.malloc(TP, 1))
assert self.interpret(func, []) == self.GC_CAN_MOVE
-
+
def test_malloc_nonmovable(self):
TP = lltype.GcArray(lltype.Char)
def func():
diff --git a/pypy/rpython/memory/test/test_transformed_gc.py b/pypy/rpython/memory/test/test_transformed_gc.py
--- a/pypy/rpython/memory/test/test_transformed_gc.py
+++ b/pypy/rpython/memory/test/test_transformed_gc.py
@@ -27,7 +27,7 @@
t.config.set(**extraconfigopts)
ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy())
ann.build_types(func, inputtypes)
-
+
if specialize:
t.buildrtyper().specialize()
if backendopt:
@@ -44,7 +44,7 @@
GC_CAN_MOVE = False
GC_CAN_MALLOC_NONMOVABLE = True
taggedpointers = False
-
+
def setup_class(cls):
funcs0 = []
funcs2 = []
@@ -155,7 +155,7 @@
return run, gct
else:
return run
-
+
class GenericGCTests(GCTest):
GC_CAN_SHRINK_ARRAY = False
@@ -190,7 +190,7 @@
j += 1
return 0
return malloc_a_lot
-
+
def test_instances(self):
run, statistics = self.runner("instances", statistics=True)
run([])
@@ -276,7 +276,7 @@
for i in range(1, 5):
res = run([i, i - 1])
assert res == i - 1 # crashes if constants are not considered roots
-
+
def define_string_concatenation(cls):
def concat(j, dummy):
lst = []
@@ -656,7 +656,7 @@
# return 2
return func
-
+
def test_malloc_nonmovable(self):
run = self.runner("malloc_nonmovable")
assert int(self.GC_CAN_MALLOC_NONMOVABLE) == run([])
@@ -676,7 +676,7 @@
return 2
return func
-
+
def test_malloc_nonmovable_fixsize(self):
run = self.runner("malloc_nonmovable_fixsize")
assert run([]) == int(self.GC_CAN_MALLOC_NONMOVABLE)
@@ -757,7 +757,7 @@
lltype.free(idarray, flavor='raw')
return 0
return f
-
+
def test_many_ids(self):
if not self.GC_CAN_TEST_ID:
py.test.skip("fails for bad reasons in lltype.py :-(")
@@ -813,7 +813,7 @@
else:
assert 0, "oups, not found"
return f, None, fix_graph_of_g
-
+
def test_do_malloc_operations(self):
run = self.runner("do_malloc_operations")
run([])
@@ -850,7 +850,7 @@
else:
assert 0, "oups, not found"
return f, None, fix_graph_of_g
-
+
def test_do_malloc_operations_in_call(self):
run = self.runner("do_malloc_operations_in_call")
run([])
@@ -861,7 +861,7 @@
l2 = []
l3 = []
l4 = []
-
+
def f():
for i in range(10):
s = lltype.malloc(S)
@@ -1026,7 +1026,7 @@
llop.gc__collect(lltype.Void)
return static.p.x + i
def cleanup():
- static.p = lltype.nullptr(T1)
+ static.p = lltype.nullptr(T1)
return f, cleanup, None
def test_nongc_static_root_minor_collect(self):
@@ -1081,7 +1081,7 @@
return 0
return f
-
+
def test_many_weakrefs(self):
run = self.runner("many_weakrefs")
run([])
@@ -1131,7 +1131,7 @@
def define_adr_of_nursery(cls):
class A(object):
pass
-
+
def f():
# we need at least 1 obj to allocate a nursery
a = A()
@@ -1147,9 +1147,9 @@
assert nt1 > nf1
assert nt1 == nt0
return 0
-
+
return f
-
+
def test_adr_of_nursery(self):
run = self.runner("adr_of_nursery")
res = run([])
@@ -1175,7 +1175,7 @@
def _teardown(self):
self.__ready = False # collecting here is expected
GenerationGC._teardown(self)
-
+
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 128*WORD,
'translated_to_c': False}
diff --git a/pypy/translator/c/test/test_newgc.py b/pypy/translator/c/test/test_newgc.py
--- a/pypy/translator/c/test/test_newgc.py
+++ b/pypy/translator/c/test/test_newgc.py
@@ -37,7 +37,7 @@
else:
print res
return 0
-
+
t = Translation(main, standalone=True, gc=cls.gcpolicy,
policy=annpolicy.StrictAnnotatorPolicy(),
taggedpointers=cls.taggedpointers,
@@ -128,10 +128,10 @@
if not args:
args = (-1, )
res = self.allfuncs(name, *args)
- num = self.name_to_func[name]
+ num = self.name_to_func[name]
if self.funcsstr[num]:
return res
- return int(res)
+ return int(res)
def define_empty_collect(cls):
def f():
@@ -228,7 +228,7 @@
T = lltype.GcStruct("T", ('y', lltype.Signed),
('s', lltype.Ptr(S)))
ARRAY_Ts = lltype.GcArray(lltype.Ptr(T))
-
+
def f():
r = 0
for i in range(30):
@@ -250,7 +250,7 @@
def test_framework_varsized(self):
res = self.run('framework_varsized')
assert res == self.run_orig('framework_varsized')
-
+
def define_framework_using_lists(cls):
class A(object):
pass
@@ -271,7 +271,7 @@
N = 1000
res = self.run('framework_using_lists')
assert res == N*(N - 1)/2
-
+
def define_framework_static_roots(cls):
class A(object):
def __init__(self, y):
@@ -318,8 +318,8 @@
def test_framework_void_array(self):
res = self.run('framework_void_array')
assert res == 44
-
-
+
+
def define_framework_malloc_failure(cls):
def f():
a = [1] * (sys.maxint//2)
@@ -342,7 +342,7 @@
def test_framework_array_of_void(self):
res = self.run('framework_array_of_void')
assert res == 43 + 1000000
-
+
def define_framework_opaque(cls):
A = lltype.GcStruct('A', ('value', lltype.Signed))
O = lltype.GcOpaqueType('test.framework')
@@ -437,7 +437,7 @@
b = B()
return 0
return func
-
+
def test_del_raises(self):
self.run('del_raises') # does not raise
@@ -712,7 +712,7 @@
def test_callback_with_collect(self):
assert self.run('callback_with_collect')
-
+
def define_can_move(cls):
class A:
pass
@@ -1255,7 +1255,7 @@
l1 = []
l2 = []
l3 = []
-
+
def f():
for i in range(10):
s = lltype.malloc(S)
@@ -1298,7 +1298,7 @@
def test_string_builder(self):
res = self.run('string_builder')
assert res == "aabcbdddd"
-
+
def definestr_string_builder_over_allocation(cls):
import gc
def fn(_):
@@ -1458,6 +1458,37 @@
res = self.run("nongc_attached_to_gc")
assert res == -99997
+ def define_nongc_opaque_attached_to_gc(cls):
+ from pypy.module._hashlib.interp_hashlib import HASH_MALLOC_SIZE
+ from pypy.rlib import rgc, ropenssl
+ from pypy.rpython.lltypesystem import rffi
+
+ class A:
+ def __init__(self):
+ self.ctx = lltype.malloc(ropenssl.EVP_MD_CTX.TO,
+ flavor='raw')
+ digest = ropenssl.EVP_get_digestbyname('sha1')
+ ropenssl.EVP_DigestInit(self.ctx, digest)
+ rgc.add_memory_pressure(HASH_MALLOC_SIZE + 64)
+
+ def __del__(self):
+ ropenssl.EVP_MD_CTX_cleanup(self.ctx)
+ lltype.free(self.ctx, flavor='raw')
+ A()
+ def f():
+ am1 = am2 = am3 = None
+ for i in range(100000):
+ am3 = am2
+ am2 = am1
+ am1 = A()
+ # what can we use for the res?
+ return 0
+ return f
+
+ def test_nongc_opaque_attached_to_gc(self):
+ res = self.run("nongc_opaque_attached_to_gc")
+ assert res == 0
+
# ____________________________________________________________________
class TaggedPointersTest(object):
More information about the pypy-commit
mailing list